diff options
Diffstat (limited to 'drivers/net')
781 files changed, 70318 insertions, 47780 deletions
diff --git a/drivers/net/Space.c b/drivers/net/Space.c index e3f0faca98d0..3a8c7532ee0d 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c @@ -37,35 +37,14 @@ ethernet adaptor have the name "eth[0123...]". */ -extern struct net_device *ne2_probe(int unit); extern struct net_device *hp100_probe(int unit); extern struct net_device *ultra_probe(int unit); -extern struct net_device *ultra32_probe(int unit); extern struct net_device *wd_probe(int unit); -extern struct net_device *el2_probe(int unit); extern struct net_device *ne_probe(int unit); -extern struct net_device *hp_probe(int unit); -extern struct net_device *hp_plus_probe(int unit); -extern struct net_device *express_probe(int unit); -extern struct net_device *eepro_probe(int unit); -extern struct net_device *at1700_probe(int unit); extern struct net_device *fmv18x_probe(int unit); -extern struct net_device *eth16i_probe(int unit); extern struct net_device *i82596_probe(int unit); -extern struct net_device *ewrk3_probe(int unit); -extern struct net_device *el1_probe(int unit); -extern struct net_device *el16_probe(int unit); -extern struct net_device *elmc_probe(int unit); -extern struct net_device *elplus_probe(int unit); -extern struct net_device *ac3200_probe(int unit); -extern struct net_device *es_probe(int unit); -extern struct net_device *lne390_probe(int unit); -extern struct net_device *e2100_probe(int unit); -extern struct net_device *ni5010_probe(int unit); -extern struct net_device *ni52_probe(int unit); extern struct net_device *ni65_probe(int unit); extern struct net_device *sonic_probe(int unit); -extern struct net_device *seeq8005_probe(int unit); extern struct net_device *smc_init(int unit); extern struct net_device *atarilance_probe(int unit); extern struct net_device *sun3lance_probe(int unit); @@ -77,13 +56,9 @@ extern struct net_device *tc515_probe(int unit); extern struct net_device *lance_probe(int unit); extern struct net_device *mac8390_probe(int unit); extern struct net_device *mac89x0_probe(int unit); -extern struct net_device *mc32_probe(int unit); extern struct net_device *cops_probe(int unit); extern struct net_device *ltpc_probe(void); -/* Detachable devices ("pocket adaptors") */ -extern struct net_device *de620_probe(int unit); - /* Fibre Channel adapters */ extern int iph5526_probe(struct net_device *dev); @@ -111,29 +86,6 @@ static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe) } /* - * This is a bit of an artificial separation as there are PCI drivers - * that also probe for EISA cards (in the PCI group) and there are ISA - * drivers that probe for EISA cards (in the ISA group). These are the - * legacy EISA only driver probes, and also the legacy PCI probes - */ - -static struct devprobe2 eisa_probes[] __initdata = { -#ifdef CONFIG_ULTRA32 - {ultra32_probe, 0}, -#endif -#ifdef CONFIG_AC3200 - {ac3200_probe, 0}, -#endif -#ifdef CONFIG_ES3210 - {es_probe, 0}, -#endif -#ifdef CONFIG_LNE390 - {lne390_probe, 0}, -#endif - {NULL, 0}, -}; - -/* * ISA probes that touch addresses < 0x400 (including those that also * look for EISA/PCI cards in addition to ISA cards). */ @@ -150,18 +102,6 @@ static struct devprobe2 isa_probes[] __initdata = { #ifdef CONFIG_WD80x3 {wd_probe, 0}, #endif -#ifdef CONFIG_EL2 /* 3c503 */ - {el2_probe, 0}, -#endif -#ifdef CONFIG_HPLAN - {hp_probe, 0}, -#endif -#ifdef CONFIG_HPLAN_PLUS - {hp_plus_probe, 0}, -#endif -#ifdef CONFIG_E2100 /* Cabletron E21xx series. */ - {e2100_probe, 0}, -#endif #if defined(CONFIG_NE2000) || \ defined(CONFIG_NE_H8300) /* ISA (use ne2k-pci for PCI cards) */ {ne_probe, 0}, @@ -172,60 +112,20 @@ static struct devprobe2 isa_probes[] __initdata = { #ifdef CONFIG_SMC9194 {smc_init, 0}, #endif -#ifdef CONFIG_SEEQ8005 - {seeq8005_probe, 0}, -#endif #ifdef CONFIG_CS89x0 #ifndef CONFIG_CS89x0_PLATFORM {cs89x0_probe, 0}, #endif #endif -#ifdef CONFIG_AT1700 - {at1700_probe, 0}, -#endif -#ifdef CONFIG_ETH16I - {eth16i_probe, 0}, /* ICL EtherTeam 16i/32 */ -#endif -#ifdef CONFIG_EEXPRESS /* Intel EtherExpress */ - {express_probe, 0}, -#endif -#ifdef CONFIG_EEXPRESS_PRO /* Intel EtherExpress Pro/10 */ - {eepro_probe, 0}, -#endif -#ifdef CONFIG_EWRK3 /* DEC EtherWORKS 3 */ - {ewrk3_probe, 0}, -#endif -#if defined(CONFIG_APRICOT) || defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel I82596 */ +#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel I82596 */ {i82596_probe, 0}, #endif -#ifdef CONFIG_EL1 /* 3c501 */ - {el1_probe, 0}, -#endif -#ifdef CONFIG_EL16 /* 3c507 */ - {el16_probe, 0}, -#endif -#ifdef CONFIG_ELPLUS /* 3c505 */ - {elplus_probe, 0}, -#endif -#ifdef CONFIG_NI5010 - {ni5010_probe, 0}, -#endif -#ifdef CONFIG_NI52 - {ni52_probe, 0}, -#endif #ifdef CONFIG_NI65 {ni65_probe, 0}, #endif {NULL, 0}, }; -static struct devprobe2 parport_probes[] __initdata = { -#ifdef CONFIG_DE620 /* D-Link DE-620 adapter */ - {de620_probe, 0}, -#endif - {NULL, 0}, -}; - static struct devprobe2 m68k_probes[] __initdata = { #ifdef CONFIG_ATARILANCE /* Lance-based Atari ethernet boards */ {atarilance_probe, 0}, @@ -264,9 +164,7 @@ static void __init ethif_probe2(int unit) return; (void)( probe_list2(unit, m68k_probes, base_addr == 0) && - probe_list2(unit, eisa_probes, base_addr == 0) && - probe_list2(unit, isa_probes, base_addr == 0) && - probe_list2(unit, parport_probes, base_addr == 0)); + probe_list2(unit, isa_probes, base_addr == 0)); } /* Statically configured drivers -- order matters here. */ diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index a030e635f001..fc58d118d844 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -389,13 +389,13 @@ static u8 __get_duplex(struct port *port) /** * __initialize_port_locks - initialize a port's STATE machine spinlock - * @port: the port we're looking at + * @port: the slave of the port we're looking at * */ -static inline void __initialize_port_locks(struct port *port) +static inline void __initialize_port_locks(struct slave *slave) { // make sure it isn't called twice - spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); + spin_lock_init(&(SLAVE_AD_INFO(slave).state_machine_lock)); } //conversions @@ -1127,7 +1127,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) // INFO_RECEIVED_LOOPBACK_FRAMES pr_err("%s: An illegal loopback occurred on adapter (%s).\n" "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", - port->slave->dev->master->name, port->slave->dev->name); + port->slave->bond->dev->name, port->slave->dev->name); return; } __update_selected(lacpdu, port); @@ -1306,7 +1306,7 @@ static void ad_port_selection_logic(struct port *port) } if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list pr_warning("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n", - port->slave->dev->master->name, + port->slave->bond->dev->name, port->actor_port_number, port->slave->dev->name, port->aggregator->aggregator_identifier); @@ -1386,7 +1386,7 @@ static void ad_port_selection_logic(struct port *port) port->aggregator->aggregator_identifier); } else { pr_err("%s: Port %d (on %s) did not find a suitable aggregator\n", - port->slave->dev->master->name, + port->slave->bond->dev->name, port->actor_port_number, port->slave->dev->name); } } @@ -1463,7 +1463,7 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best, default: pr_warning("%s: Impossible agg select mode %d\n", - curr->slave->dev->master->name, + curr->slave->bond->dev->name, __get_agg_selection_mode(curr->lag_ports)); break; } @@ -1571,7 +1571,7 @@ static void ad_agg_selection_logic(struct aggregator *agg) // check if any partner replys if (best->is_individual) { pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n", - best->slave ? best->slave->dev->master->name : "NULL"); + best->slave ? best->slave->bond->dev->name : "NULL"); } best->is_active = 1; @@ -1898,7 +1898,7 @@ int bond_3ad_bind_slave(struct slave *slave) if (bond == NULL) { pr_err("%s: The slave %s is not attached to its bond\n", - slave->dev->master->name, slave->dev->name); + slave->bond->dev->name, slave->dev->name); return -1; } @@ -1910,6 +1910,7 @@ int bond_3ad_bind_slave(struct slave *slave) ad_initialize_port(port, bond->params.lacp_fast); + __initialize_port_locks(slave); port->slave = slave; port->actor_port_number = SLAVE_AD_INFO(slave).id; // key is determined according to the link speed, duplex and user key(which is yet not supported) @@ -1932,8 +1933,6 @@ int bond_3ad_bind_slave(struct slave *slave) port->next_port_in_aggregator = NULL; __disable_port(port); - __initialize_port_locks(port); - // aggregator initialization aggregator = &(SLAVE_AD_INFO(slave).aggregator); @@ -1973,7 +1972,7 @@ void bond_3ad_unbind_slave(struct slave *slave) // if slave is null, the whole port is not initialized if (!port->slave) { pr_warning("Warning: %s: Trying to unbind an uninitialized port on %s\n", - slave->dev->master->name, slave->dev->name); + slave->bond->dev->name, slave->dev->name); return; } @@ -2009,7 +2008,7 @@ void bond_3ad_unbind_slave(struct slave *slave) if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) { pr_info("%s: Removing an active aggregator\n", - aggregator->slave->dev->master->name); + aggregator->slave->bond->dev->name); // select new active aggregator select_new_active_agg = 1; } @@ -2040,7 +2039,7 @@ void bond_3ad_unbind_slave(struct slave *slave) ad_agg_selection_logic(__get_first_agg(port)); } else { pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n", - slave->dev->master->name); + slave->bond->dev->name); } } else { // in case that the only port related to this aggregator is the one we want to remove select_new_active_agg = aggregator->is_active; @@ -2048,7 +2047,7 @@ void bond_3ad_unbind_slave(struct slave *slave) ad_clear_agg(aggregator); if (select_new_active_agg) { pr_info("%s: Removing an active aggregator\n", - slave->dev->master->name); + slave->bond->dev->name); // select new active aggregator ad_agg_selection_logic(__get_first_agg(port)); } @@ -2076,7 +2075,7 @@ void bond_3ad_unbind_slave(struct slave *slave) ad_clear_agg(temp_aggregator); if (select_new_active_agg) { pr_info("%s: Removing an active aggregator\n", - slave->dev->master->name); + slave->bond->dev->name); // select new active aggregator ad_agg_selection_logic(__get_first_agg(port)); } @@ -2184,7 +2183,7 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u1 if (!port->slave) { pr_warning("%s: Warning: port of slave %s is uninitialized\n", - slave->dev->name, slave->dev->master->name); + slave->dev->name, slave->bond->dev->name); return ret; } @@ -2240,7 +2239,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) // if slave is null, the whole port is not initialized if (!port->slave) { pr_warning("Warning: %s: speed changed for uninitialized port on %s\n", - slave->dev->master->name, slave->dev->name); + slave->bond->dev->name, slave->dev->name); return; } @@ -2268,7 +2267,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) // if slave is null, the whole port is not initialized if (!port->slave) { pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n", - slave->dev->master->name, slave->dev->name); + slave->bond->dev->name, slave->dev->name); return; } @@ -2297,7 +2296,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) // if slave is null, the whole port is not initialized if (!port->slave) { pr_warning("Warning: %s: link status changed for uninitialized port on %s\n", - slave->dev->master->name, slave->dev->name); + slave->bond->dev->name, slave->dev->name); return; } @@ -2494,11 +2493,13 @@ void bond_3ad_update_lacp_rate(struct bonding *bond) struct port *port = NULL; int lacp_fast; - read_lock(&bond->lock); + write_lock_bh(&bond->lock); lacp_fast = bond->params.lacp_fast; bond_for_each_slave(bond, slave, i) { port = &(SLAVE_AD_INFO(slave).port); + if (port->slave == NULL) + continue; __get_state_machine_lock(port); if (lacp_fast) port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; @@ -2507,5 +2508,5 @@ void bond_3ad_update_lacp_rate(struct bonding *bond) __release_state_machine_lock(port); } - read_unlock(&bond->lock); + write_unlock_bh(&bond->lock); } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 7c9d136e74be..f5e052723029 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -507,7 +507,7 @@ static void rlb_update_client(struct rlb_client_info *client_info) client_info->mac_dst); if (!skb) { pr_err("%s: Error: failed to create an ARP packet\n", - client_info->slave->dev->master->name); + client_info->slave->bond->dev->name); continue; } @@ -517,7 +517,7 @@ static void rlb_update_client(struct rlb_client_info *client_info) skb = vlan_put_tag(skb, client_info->vlan_id); if (!skb) { pr_err("%s: Error: failed to insert VLAN tag\n", - client_info->slave->dev->master->name); + client_info->slave->bond->dev->name); continue; } } @@ -1043,7 +1043,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[]) if (dev_set_mac_address(dev, &s_addr)) { pr_err("%s: Error: dev_set_mac_address of dev %s failed!\n" "ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n", - dev->master->name, dev->name); + slave->bond->dev->name, dev->name); return -EOPNOTSUPP; } return 0; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b7d45f367d4a..11d01d67b3f5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -746,11 +746,9 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev) { struct in_device *in_dev; - rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (in_dev) ip_mc_rejoin_groups(in_dev); - rcu_read_unlock(); } /* @@ -760,9 +758,10 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev) */ static void bond_resend_igmp_join_requests(struct bonding *bond) { - struct net_device *bond_dev, *vlan_dev, *master_dev; + struct net_device *bond_dev, *vlan_dev, *upper_dev; struct vlan_entry *vlan; + rcu_read_lock(); read_lock(&bond->lock); bond_dev = bond->dev; @@ -774,18 +773,14 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) * if bond is enslaved to a bridge, * then rejoin all groups on its master */ - master_dev = bond_dev->master; - if (master_dev) - if ((master_dev->priv_flags & IFF_EBRIDGE) - && (bond_dev->priv_flags & IFF_BRIDGE_PORT)) - __bond_resend_igmp_join_requests(master_dev); + upper_dev = netdev_master_upper_dev_get_rcu(bond_dev); + if (upper_dev && upper_dev->priv_flags & IFF_EBRIDGE) + __bond_resend_igmp_join_requests(upper_dev); /* rejoin all groups on vlan devices */ list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { - rcu_read_lock(); vlan_dev = __vlan_find_dev_deep(bond_dev, vlan->vlan_id); - rcu_read_unlock(); if (vlan_dev) __bond_resend_igmp_join_requests(vlan_dev); } @@ -794,13 +789,16 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); read_unlock(&bond->lock); + rcu_read_unlock(); } static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) { struct bonding *bond = container_of(work, struct bonding, mcast_work.work); + rcu_read_lock(); bond_resend_igmp_join_requests(bond); + rcu_read_unlock(); } /* @@ -1251,7 +1249,7 @@ static inline void slave_disable_netpoll(struct slave *slave) return; slave->np = NULL; - __netpoll_free_rcu(np); + __netpoll_free_async(np); } static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) { @@ -1322,14 +1320,15 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev) /*---------------------------------- IOCTL ----------------------------------*/ -static int bond_sethwaddr(struct net_device *bond_dev, - struct net_device *slave_dev) +static void bond_set_dev_addr(struct net_device *bond_dev, + struct net_device *slave_dev) { pr_debug("bond_dev=%p\n", bond_dev); pr_debug("slave_dev=%p\n", slave_dev); pr_debug("slave_dev->addr_len=%d\n", slave_dev->addr_len); memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len); - return 0; + bond_dev->addr_assign_type = NET_ADDR_SET; + call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); } static netdev_features_t bond_fix_features(struct net_device *dev, @@ -1493,6 +1492,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) return ret; } +static int bond_master_upper_dev_link(struct net_device *bond_dev, + struct net_device *slave_dev) +{ + int err; + + err = netdev_master_upper_dev_link(slave_dev, bond_dev); + if (err) + return err; + slave_dev->flags |= IFF_SLAVE; + rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE); + return 0; +} + +static void bond_upper_dev_unlink(struct net_device *bond_dev, + struct net_device *slave_dev) +{ + netdev_upper_dev_unlink(slave_dev, bond_dev); + slave_dev->flags &= ~IFF_SLAVE; + rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE); +} + /* enslave device <slave> to bond device <master> */ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) { @@ -1609,10 +1629,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) /* If this is the first slave, then we need to set the master's hardware * address to be the same as the slave's. */ - if (is_zero_ether_addr(bond->dev->dev_addr)) - memcpy(bond->dev->dev_addr, slave_dev->dev_addr, - slave_dev->addr_len); - + if (bond->dev_addr_from_first) + bond_set_dev_addr(bond->dev, slave_dev); new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); if (!new_slave) { @@ -1655,9 +1673,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } } - res = netdev_set_bond_master(slave_dev, bond_dev); + res = bond_master_upper_dev_link(bond_dev, slave_dev); if (res) { - pr_debug("Error %d calling netdev_set_bond_master\n", res); + pr_debug("Error %d calling bond_master_upper_dev_link\n", res); goto err_restore_mac; } @@ -1891,7 +1909,7 @@ err_close: dev_close(slave_dev); err_unset_master: - netdev_set_bond_master(slave_dev, NULL); + bond_upper_dev_unlink(bond_dev, slave_dev); err_restore_mac: if (!bond->params.fail_over_mac) { @@ -1919,7 +1937,8 @@ err_undo_flags: /* * Try to release the slave device <slave> from the bond device <master> * It is legal to access curr_active_slave without a lock because all the function - * is write-locked. + * is write-locked. If "all" is true it means that the function is being called + * while destroying a bond interface and all slaves are being released. * * The rules for slave state should be: * for Active/Backup: @@ -1927,7 +1946,9 @@ err_undo_flags: * for Bonded connections: * The first up interface should be left on and all others downed. */ -int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) +static int __bond_release_one(struct net_device *bond_dev, + struct net_device *slave_dev, + bool all) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *oldcurrent; @@ -1936,7 +1957,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) /* slave is not a slave or master is not master of this slave */ if (!(slave_dev->flags & IFF_SLAVE) || - (slave_dev->master != bond_dev)) { + !netdev_has_upper_dev(slave_dev, bond_dev)) { pr_err("%s: Error: cannot release %s.\n", bond_dev->name, slave_dev->name); return -EINVAL; @@ -1964,7 +1985,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) synchronize_net(); write_lock_bh(&bond->lock); - if (!bond->params.fail_over_mac) { + if (!all && !bond->params.fail_over_mac) { if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) && bond->slave_cnt > 1) pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n", @@ -2010,7 +2031,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) write_lock_bh(&bond->lock); } - if (oldcurrent == slave) { + if (all) { + bond->curr_active_slave = NULL; + } else if (oldcurrent == slave) { /* * Note that we hold RTNL over this sequence, so there * is no concern that another slave add/remove event @@ -2029,12 +2052,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) if (bond->slave_cnt == 0) { bond_set_carrier(bond); - - /* if the last slave was removed, zero the mac address - * of the master so it will be set by the application - * to the mac address of the first slave - */ - memset(bond_dev->dev_addr, 0, bond_dev->addr_len); + eth_hw_addr_random(bond_dev); + bond->dev_addr_from_first = true; if (bond_vlan_used(bond)) { pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", @@ -2080,7 +2099,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) netif_addr_unlock_bh(bond_dev); } - netdev_set_bond_master(slave_dev, NULL); + bond_upper_dev_unlink(bond_dev, slave_dev); slave_disable_netpoll(slave); @@ -2103,6 +2122,12 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) return 0; /* deletion OK */ } +/* A wrapper used because of ndo_del_link */ +int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) +{ + return __bond_release_one(bond_dev, slave_dev, false); +} + /* * First release a slave and then destroy the bond if no more slaves are left. * Must be under rtnl_lock when this function is called. @@ -2124,121 +2149,6 @@ static int bond_release_and_destroy(struct net_device *bond_dev, } /* - * This function releases all slaves. - */ -static int bond_release_all(struct net_device *bond_dev) -{ - struct bonding *bond = netdev_priv(bond_dev); - struct slave *slave; - struct net_device *slave_dev; - struct sockaddr addr; - - write_lock_bh(&bond->lock); - - netif_carrier_off(bond_dev); - - if (bond->slave_cnt == 0) - goto out; - - bond->current_arp_slave = NULL; - bond->primary_slave = NULL; - bond_change_active_slave(bond, NULL); - - while ((slave = bond->first_slave) != NULL) { - /* Inform AD package of unbinding of slave - * before slave is detached from the list. - */ - if (bond->params.mode == BOND_MODE_8023AD) - bond_3ad_unbind_slave(slave); - - slave_dev = slave->dev; - bond_detach_slave(bond, slave); - - /* now that the slave is detached, unlock and perform - * all the undo steps that should not be called from - * within a lock. - */ - write_unlock_bh(&bond->lock); - - /* unregister rx_handler early so bond_handle_frame wouldn't - * be called for this slave anymore. - */ - netdev_rx_handler_unregister(slave_dev); - synchronize_net(); - - if (bond_is_lb(bond)) { - /* must be called only after the slave - * has been detached from the list - */ - bond_alb_deinit_slave(bond, slave); - } - - bond_destroy_slave_symlinks(bond_dev, slave_dev); - bond_del_vlans_from_slave(bond, slave_dev); - - /* If the mode USES_PRIMARY, then we should only remove its - * promisc and mc settings if it was the curr_active_slave, but that was - * already taken care of above when we detached the slave - */ - if (!USES_PRIMARY(bond->params.mode)) { - /* unset promiscuity level from slave */ - if (bond_dev->flags & IFF_PROMISC) - dev_set_promiscuity(slave_dev, -1); - - /* unset allmulti level from slave */ - if (bond_dev->flags & IFF_ALLMULTI) - dev_set_allmulti(slave_dev, -1); - - /* flush master's mc_list from slave */ - netif_addr_lock_bh(bond_dev); - bond_mc_list_flush(bond_dev, slave_dev); - netif_addr_unlock_bh(bond_dev); - } - - netdev_set_bond_master(slave_dev, NULL); - - slave_disable_netpoll(slave); - - /* close slave before restoring its mac address */ - dev_close(slave_dev); - - if (!bond->params.fail_over_mac) { - /* restore original ("permanent") mac address*/ - memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); - addr.sa_family = slave_dev->type; - dev_set_mac_address(slave_dev, &addr); - } - - kfree(slave); - - /* re-acquire the lock before getting the next slave */ - write_lock_bh(&bond->lock); - } - - /* zero the mac address of the master so it will be - * set by the application to the mac address of the - * first slave - */ - memset(bond_dev->dev_addr, 0, bond_dev->addr_len); - - if (bond_vlan_used(bond)) { - pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", - bond_dev->name, bond_dev->name); - pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", - bond_dev->name); - } - - pr_info("%s: released all slaves\n", bond_dev->name); - -out: - write_unlock_bh(&bond->lock); - - bond_compute_features(bond); - - return 0; -} - -/* * This function changes the active slave to slave <slave_dev>. * It returns -EINVAL in the following cases. * - <slave_dev> is not found in the list. @@ -2259,8 +2169,9 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi if (!USES_PRIMARY(bond->params.mode)) return -EINVAL; - /* Verify that master_dev is indeed the master of slave_dev */ - if (!(slave_dev->flags & IFF_SLAVE) || (slave_dev->master != bond_dev)) + /* Verify that bond_dev is indeed the master of slave_dev */ + if (!(slave_dev->flags & IFF_SLAVE) || + !netdev_has_upper_dev(slave_dev, bond_dev)) return -EINVAL; read_lock(&bond->lock); @@ -3258,36 +3169,32 @@ static int bond_master_netdev_event(unsigned long event, static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev) { - struct net_device *bond_dev = slave_dev->master; - struct bonding *bond = netdev_priv(bond_dev); - struct slave *slave = NULL; + struct slave *slave = bond_slave_get_rtnl(slave_dev); + struct bonding *bond = slave->bond; + struct net_device *bond_dev = slave->bond->dev; + u32 old_speed; + u8 old_duplex; switch (event) { case NETDEV_UNREGISTER: - if (bond_dev) { - if (bond->setup_by_slave) - bond_release_and_destroy(bond_dev, slave_dev); - else - bond_release(bond_dev, slave_dev); - } + if (bond->setup_by_slave) + bond_release_and_destroy(bond_dev, slave_dev); + else + bond_release(bond_dev, slave_dev); break; case NETDEV_UP: case NETDEV_CHANGE: - slave = bond_get_slave_by_dev(bond, slave_dev); - if (slave) { - u32 old_speed = slave->speed; - u8 old_duplex = slave->duplex; + old_speed = slave->speed; + old_duplex = slave->duplex; - bond_update_speed_duplex(slave); + bond_update_speed_duplex(slave); - if (bond->params.mode == BOND_MODE_8023AD) { - if (old_speed != slave->speed) - bond_3ad_adapter_speed_changed(slave); - if (old_duplex != slave->duplex) - bond_3ad_adapter_duplex_changed(slave); - } + if (bond->params.mode == BOND_MODE_8023AD) { + if (old_speed != slave->speed) + bond_3ad_adapter_speed_changed(slave); + if (old_duplex != slave->duplex) + bond_3ad_adapter_duplex_changed(slave); } - break; case NETDEV_DOWN: /* @@ -3604,6 +3511,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd struct ifslave k_sinfo; struct ifslave __user *u_sinfo = NULL; struct mii_ioctl_data *mii = NULL; + struct net *net; int res = 0; pr_debug("bond_ioctl: master=%s, cmd=%d\n", bond_dev->name, cmd); @@ -3670,10 +3578,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd break; } - if (!capable(CAP_NET_ADMIN)) + net = dev_net(bond_dev); + + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; - slave_dev = dev_get_by_name(dev_net(bond_dev), ifr->ifr_slave); + slave_dev = dev_get_by_name(net, ifr->ifr_slave); pr_debug("slave_dev=%p:\n", slave_dev); @@ -3692,7 +3602,8 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd break; case BOND_SETHWADDR_OLD: case SIOCBONDSETHWADDR: - res = bond_sethwaddr(bond_dev, slave_dev); + bond_set_dev_addr(bond_dev, slave_dev); + res = 0; break; case BOND_CHANGE_ACTIVE_OLD: case SIOCBONDCHANGEACTIVE: @@ -4314,11 +4225,12 @@ void bond_set_mode_ops(struct bonding *bond, int mode) } static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, - struct ethtool_drvinfo *drvinfo) + struct ethtool_drvinfo *drvinfo) { - strncpy(drvinfo->driver, DRV_NAME, 32); - strncpy(drvinfo->version, DRV_VERSION, 32); - snprintf(drvinfo->fw_version, 32, "%d", BOND_ABI_VERSION); + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d", + BOND_ABI_VERSION); } static const struct ethtool_ops bond_ethtool_ops = { @@ -4352,6 +4264,10 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_fix_features = bond_fix_features, }; +static const struct device_type bond_type = { + .name = "bond", +}; + static void bond_destructor(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); @@ -4382,6 +4298,8 @@ static void bond_setup(struct net_device *bond_dev) bond_dev->destructor = bond_destructor; + SET_NETDEV_DEVTYPE(bond_dev, &bond_type); + /* Initialize the device options */ bond_dev->tx_queue_len = 0; bond_dev->flags |= IFF_MASTER|IFF_MULTICAST; @@ -4427,7 +4345,9 @@ static void bond_uninit(struct net_device *bond_dev) bond_netpoll_cleanup(bond_dev); /* Release the bonded slaves */ - bond_release_all(bond_dev); + while (bond->first_slave != NULL) + __bond_release_one(bond_dev, bond->first_slave->dev, true); + pr_info("%s: released all slaves\n", bond_dev->name); list_del(&bond->bond_list); @@ -4841,6 +4761,13 @@ static int bond_init(struct net_device *bond_dev) bond_debug_register(bond); + /* Ensure valid dev_addr */ + if (is_zero_ether_addr(bond_dev->dev_addr) && + bond_dev->addr_assign_type == NET_ADDR_PERM) { + eth_hw_addr_random(bond_dev); + bond->dev_addr_from_first = true; + } + __hw_addr_init(&bond->mc_list); return 0; } diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 21b68e5c14fd..2baec24388b1 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -248,6 +248,7 @@ struct bonding { /* debugging support via debugfs */ struct dentry *debug_dir; #endif /* CONFIG_DEBUG_FS */ + bool dev_addr_from_first; }; static inline bool bond_vlan_used(struct bonding *bond) @@ -258,6 +259,9 @@ static inline bool bond_vlan_used(struct bonding *bond) #define bond_slave_get_rcu(dev) \ ((struct slave *) rcu_dereference(dev->rx_handler_data)) +#define bond_slave_get_rtnl(dev) \ + ((struct slave *) rtnl_dereference(dev->rx_handler_data)) + /** * Returns NULL if the net_device does not belong to any of the bond's slaves * @@ -280,11 +284,9 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) { - if (!slave || !slave->dev->master) { + if (!slave || !slave->bond) return NULL; - } - - return netdev_priv(slave->dev->master); + return slave->bond; } static inline bool bond_is_lb(const struct bonding *bond) @@ -360,10 +362,9 @@ static inline void bond_netpoll_send_skb(const struct slave *slave, static inline void bond_set_slave_inactive_flags(struct slave *slave) { - struct bonding *bond = netdev_priv(slave->dev->master); - if (!bond_is_lb(bond)) + if (!bond_is_lb(slave->bond)) bond_set_backup_slave(slave); - if (!bond->params.all_slaves_active) + if (!slave->bond->params.all_slaves_active) slave->inactive = 1; } diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c index bc497d718858..bce8bac311c9 100644 --- a/drivers/net/caif/caif_shmcore.c +++ b/drivers/net/caif/caif_shmcore.c @@ -633,9 +633,6 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev) kmalloc(sizeof(struct buf_list), GFP_KERNEL); if (tx_buf == NULL) { - pr_warn("ERROR, Could not" - " allocate dynamic mem. for tx_buf," - " Bailing out ...\n"); free_netdev(pshm_dev->pshm_netdev); return -ENOMEM; } @@ -662,9 +659,6 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev) kmalloc(sizeof(struct buf_list), GFP_KERNEL); if (rx_buf == NULL) { - pr_warn("ERROR, Could not" - " allocate dynamic mem.for rx_buf," - " Bailing out ...\n"); free_netdev(pshm_dev->pshm_netdev); return -ENOMEM; } diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index b56bd9e80957..1cca19f1c490 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -1,9 +1,7 @@ menu "CAN Device Drivers" - depends on CAN config CAN_VCAN tristate "Virtual Local CAN Interface (vcan)" - depends on CAN ---help--- Similar to the network loopback devices, vcan offers a virtual local CAN interface. @@ -13,7 +11,6 @@ config CAN_VCAN config CAN_SLCAN tristate "Serial / USB serial CAN Adaptors (slcan)" - depends on CAN ---help--- CAN driver for several 'low cost' CAN interfaces that are attached via serial lines or via USB-to-serial adapters using the LAWICEL @@ -33,16 +30,16 @@ config CAN_SLCAN config CAN_DEV tristate "Platform CAN drivers with Netlink support" - depends on CAN default y ---help--- Enables the common framework for platform CAN drivers with Netlink support. This is the standard library for CAN drivers. If unsure, say Y. +if CAN_DEV + config CAN_CALC_BITTIMING bool "CAN bit-timing calculation" - depends on CAN_DEV default y ---help--- If enabled, CAN bit-timing parameters will be calculated for the @@ -54,15 +51,26 @@ config CAN_CALC_BITTIMING arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw". If unsure, say Y. +config CAN_LEDS + bool "Enable LED triggers for Netlink based drivers" + depends on LEDS_CLASS + select LEDS_TRIGGERS + ---help--- + This option adds two LED triggers for packet receive and transmit + events on each supported CAN device. + + Say Y here if you are working on a system with led-class supported + LEDs and you want to use them as canbus activity indicators. + config CAN_AT91 tristate "Atmel AT91 onchip CAN controller" - depends on CAN_DEV && (ARCH_AT91SAM9263 || ARCH_AT91SAM9X5) + depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9X5 ---help--- This is a driver for the SoC CAN controller in Atmel's AT91SAM9263 and AT91SAM9X5 processors. config CAN_TI_HECC - depends on CAN_DEV && ARCH_OMAP3 + depends on ARCH_OMAP3 tristate "TI High End CAN Controller" ---help--- Driver for TI HECC (High End CAN Controller) module found on many @@ -70,12 +78,12 @@ config CAN_TI_HECC config CAN_MCP251X tristate "Microchip MCP251x SPI CAN controllers" - depends on CAN_DEV && SPI && HAS_DMA + depends on SPI && HAS_DMA ---help--- Driver for the Microchip MCP251x SPI CAN controllers. config CAN_BFIN - depends on CAN_DEV && (BF534 || BF536 || BF537 || BF538 || BF539 || BF54x) + depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x tristate "Analog Devices Blackfin on-chip CAN" ---help--- Driver for the Analog Devices Blackfin on-chip CAN controllers @@ -85,7 +93,7 @@ config CAN_BFIN config CAN_JANZ_ICAN3 tristate "Janz VMOD-ICAN3 Intelligent CAN controller" - depends on CAN_DEV && MFD_JANZ_CMODIO + depends on MFD_JANZ_CMODIO ---help--- Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which connects to a MODULbus carrier board. @@ -98,13 +106,13 @@ config HAVE_CAN_FLEXCAN config CAN_FLEXCAN tristate "Support for Freescale FLEXCAN based chips" - depends on CAN_DEV && HAVE_CAN_FLEXCAN + depends on HAVE_CAN_FLEXCAN ---help--- Say Y here if you want to support for Freescale FlexCAN. config PCH_CAN tristate "Intel EG20T PCH CAN controller" - depends on CAN_DEV && PCI + depends on PCI ---help--- This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which is an IOH for x86 embedded processor (Intel Atom E6xx series). @@ -112,7 +120,7 @@ config PCH_CAN config CAN_GRCAN tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices" - depends on CAN_DEV && OF + depends on OF ---help--- Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN. Note that the driver supports little endian, even though little @@ -131,9 +139,10 @@ source "drivers/net/can/usb/Kconfig" source "drivers/net/can/softing/Kconfig" +endif + config CAN_DEBUG_DEVICES bool "CAN devices debugging messages" - depends on CAN ---help--- Say Y here if you want the CAN device drivers to produce a bunch of debug messages to the system log. Select this if you are having diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 7de59862bbe9..c7440392adbb 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -8,6 +8,8 @@ obj-$(CONFIG_CAN_SLCAN) += slcan.o obj-$(CONFIG_CAN_DEV) += can-dev.o can-dev-y := dev.o +can-dev-$(CONFIG_CAN_LEDS) += led.o + obj-y += usb/ obj-y += softing/ diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 81baefda037b..44f363792b59 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -37,6 +37,7 @@ #include <linux/can/dev.h> #include <linux/can/error.h> +#include <linux/can/led.h> #define AT91_MB_MASK(i) ((1 << (i)) - 1) @@ -641,6 +642,8 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb) stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + + can_led_event(dev, CAN_LED_EVENT_RX); } /** @@ -875,6 +878,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ can_get_echo_skb(dev, mb - get_mb_tx_first(priv)); dev->stats.tx_packets++; + can_led_event(dev, CAN_LED_EVENT_TX); } } @@ -1128,6 +1132,8 @@ static int at91_open(struct net_device *dev) goto out_close; } + can_led_event(dev, CAN_LED_EVENT_OPEN); + /* start chip and queuing */ at91_chip_start(dev); napi_enable(&priv->napi); @@ -1159,6 +1165,8 @@ static int at91_close(struct net_device *dev) close_candev(dev); + can_led_event(dev, CAN_LED_EVENT_STOP); + return 0; } @@ -1321,6 +1329,8 @@ static int at91_can_probe(struct platform_device *pdev) goto exit_free; } + devm_can_led_init(dev); + dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", priv->reg_base, dev->irq); diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig index 3b83bafcd947..61ffc12d8fd8 100644 --- a/drivers/net/can/c_can/Kconfig +++ b/drivers/net/can/c_can/Kconfig @@ -1,6 +1,6 @@ menuconfig CAN_C_CAN tristate "Bosch C_CAN/D_CAN devices" - depends on CAN_DEV && HAS_IOMEM + depends on HAS_IOMEM if CAN_C_CAN diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 2282b1ae9765..a668cd491cb3 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -39,6 +39,7 @@ #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> +#include <linux/can/led.h> #include "c_can.h" @@ -477,6 +478,8 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl) stats->rx_packets++; stats->rx_bytes += frame->can_dlc; + can_led_event(dev, CAN_LED_EVENT_RX); + return 0; } @@ -755,6 +758,7 @@ static void c_can_do_tx(struct net_device *dev) C_CAN_IFACE(MSGCTRL_REG, 0)) & IF_MCONT_DLC_MASK; stats->tx_packets++; + can_led_event(dev, CAN_LED_EVENT_TX); c_can_inval_msg_object(dev, 0, msg_obj_no); } else { break; @@ -1119,6 +1123,8 @@ static int c_can_open(struct net_device *dev) napi_enable(&priv->napi); + can_led_event(dev, CAN_LED_EVENT_OPEN); + /* start the c_can controller */ c_can_start(dev); @@ -1147,6 +1153,8 @@ static int c_can_close(struct net_device *dev) c_can_reset_ram(priv, false); c_can_pm_runtime_put_sync(priv); + can_led_event(dev, CAN_LED_EVENT_STOP); + return 0; } @@ -1272,6 +1280,8 @@ int register_c_can_dev(struct net_device *dev) err = register_candev(dev); if (err) c_can_pm_runtime_disable(priv); + else + devm_can_led_init(dev); return err; } diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig index 22c07a8c8b43..6a9a5ba79220 100644 --- a/drivers/net/can/cc770/Kconfig +++ b/drivers/net/can/cc770/Kconfig @@ -1,6 +1,6 @@ menuconfig CAN_CC770 tristate "Bosch CC770 and Intel AN82527 devices" - depends on CAN_DEV && HAS_IOMEM + depends on HAS_IOMEM if CAN_CC770 diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 8233e5ed2939..f9cba4123c66 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -24,7 +24,9 @@ #include <linux/if_arp.h> #include <linux/can.h> #include <linux/can/dev.h> +#include <linux/can/skb.h> #include <linux/can/netlink.h> +#include <linux/can/led.h> #include <net/rtnetlink.h> #define MOD_DESC "CAN device driver interface" @@ -501,13 +503,18 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) { struct sk_buff *skb; - skb = netdev_alloc_skb(dev, sizeof(struct can_frame)); + skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) + + sizeof(struct can_frame)); if (unlikely(!skb)) return NULL; skb->protocol = htons(ETH_P_CAN); skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; + + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = dev->ifindex; + *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); memset(*cf, 0, sizeof(struct can_frame)); @@ -794,10 +801,25 @@ void unregister_candev(struct net_device *dev) } EXPORT_SYMBOL_GPL(unregister_candev); +/* + * Test if a network device is a candev based device + * and return the can_priv* if so. + */ +struct can_priv *safe_candev_priv(struct net_device *dev) +{ + if ((dev->type != ARPHRD_CAN) || (dev->rtnl_link_ops != &can_link_ops)) + return NULL; + + return netdev_priv(dev); +} +EXPORT_SYMBOL_GPL(safe_candev_priv); + static __init int can_dev_init(void) { int err; + can_led_notifier_init(); + err = rtnl_link_register(&can_link_ops); if (!err) printk(KERN_INFO MOD_DESC "\n"); @@ -809,6 +831,8 @@ module_init(can_dev_init); static __exit void can_dev_exit(void) { rtnl_link_unregister(&can_link_ops); + + can_led_notifier_exit(); } module_exit(can_dev_exit); diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 0289a6d86f66..769d29ed106d 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -23,6 +23,7 @@ #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> +#include <linux/can/led.h> #include <linux/can/platform/flexcan.h> #include <linux/clk.h> #include <linux/delay.h> @@ -564,6 +565,8 @@ static int flexcan_read_frame(struct net_device *dev) stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + can_led_event(dev, CAN_LED_EVENT_RX); + return 1; } @@ -652,6 +655,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) { stats->tx_bytes += can_get_echo_skb(dev, 0); stats->tx_packets++; + can_led_event(dev, CAN_LED_EVENT_TX); flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); netif_wake_queue(dev); } @@ -865,6 +869,9 @@ static int flexcan_open(struct net_device *dev) err = flexcan_chip_start(dev); if (err) goto out_close; + + can_led_event(dev, CAN_LED_EVENT_OPEN); + napi_enable(&priv->napi); netif_start_queue(dev); @@ -893,6 +900,8 @@ static int flexcan_close(struct net_device *dev) close_candev(dev); + can_led_event(dev, CAN_LED_EVENT_STOP); + return 0; } @@ -1092,6 +1101,8 @@ static int flexcan_probe(struct platform_device *pdev) goto failed_register; } + devm_can_led_init(dev); + dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", priv->base, dev->irq); diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c new file mode 100644 index 000000000000..f27fca65dc4a --- /dev/null +++ b/drivers/net/can/led.c @@ -0,0 +1,124 @@ +/* + * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com> + * Copyright 2012, Kurt Van Dijck <kurt.van.dijck@eia.be> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/netdevice.h> +#include <linux/can/dev.h> + +#include <linux/can/led.h> + +static unsigned long led_delay = 50; +module_param(led_delay, ulong, 0644); +MODULE_PARM_DESC(led_delay, + "blink delay time for activity leds (msecs, default: 50)."); + +/* Trigger a LED event in response to a CAN device event */ +void can_led_event(struct net_device *netdev, enum can_led_event event) +{ + struct can_priv *priv = netdev_priv(netdev); + + switch (event) { + case CAN_LED_EVENT_OPEN: + led_trigger_event(priv->tx_led_trig, LED_FULL); + led_trigger_event(priv->rx_led_trig, LED_FULL); + break; + case CAN_LED_EVENT_STOP: + led_trigger_event(priv->tx_led_trig, LED_OFF); + led_trigger_event(priv->rx_led_trig, LED_OFF); + break; + case CAN_LED_EVENT_TX: + if (led_delay) + led_trigger_blink_oneshot(priv->tx_led_trig, + &led_delay, &led_delay, 1); + break; + case CAN_LED_EVENT_RX: + if (led_delay) + led_trigger_blink_oneshot(priv->rx_led_trig, + &led_delay, &led_delay, 1); + break; + } +} +EXPORT_SYMBOL_GPL(can_led_event); + +static void can_led_release(struct device *gendev, void *res) +{ + struct can_priv *priv = netdev_priv(to_net_dev(gendev)); + + led_trigger_unregister_simple(priv->tx_led_trig); + led_trigger_unregister_simple(priv->rx_led_trig); +} + +/* Register CAN LED triggers for a CAN device + * + * This is normally called from a driver's probe function + */ +void devm_can_led_init(struct net_device *netdev) +{ + struct can_priv *priv = netdev_priv(netdev); + void *res; + + res = devres_alloc(can_led_release, 0, GFP_KERNEL); + if (!res) { + netdev_err(netdev, "cannot register LED triggers\n"); + return; + } + + snprintf(priv->tx_led_trig_name, sizeof(priv->tx_led_trig_name), + "%s-tx", netdev->name); + snprintf(priv->rx_led_trig_name, sizeof(priv->rx_led_trig_name), + "%s-rx", netdev->name); + + led_trigger_register_simple(priv->tx_led_trig_name, + &priv->tx_led_trig); + led_trigger_register_simple(priv->rx_led_trig_name, + &priv->rx_led_trig); + + devres_add(&netdev->dev, res); +} +EXPORT_SYMBOL_GPL(devm_can_led_init); + +/* NETDEV rename notifier to rename the associated led triggers too */ +static int can_led_notifier(struct notifier_block *nb, unsigned long msg, + void *data) +{ + struct net_device *netdev = data; + struct can_priv *priv = safe_candev_priv(netdev); + char name[CAN_LED_NAME_SZ]; + + if (!priv) + return NOTIFY_DONE; + + if (msg == NETDEV_CHANGENAME) { + snprintf(name, sizeof(name), "%s-tx", netdev->name); + led_trigger_rename_static(name, priv->tx_led_trig); + + snprintf(name, sizeof(name), "%s-rx", netdev->name); + led_trigger_rename_static(name, priv->rx_led_trig); + } + + return NOTIFY_DONE; +} + +/* notifier block for netdevice event */ +static struct notifier_block can_netdev_notifier __read_mostly = { + .notifier_call = can_led_notifier, +}; + +int __init can_led_notifier_init(void) +{ + return register_netdevice_notifier(&can_netdev_notifier); +} + +void __exit can_led_notifier_exit(void) +{ + unregister_netdevice_notifier(&can_netdev_notifier); +} diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 5eaf47b8e37b..f32b9fc6a983 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -60,6 +60,7 @@ #include <linux/can/core.h> #include <linux/can/dev.h> +#include <linux/can/led.h> #include <linux/can/platform/mcp251x.h> #include <linux/completion.h> #include <linux/delay.h> @@ -494,6 +495,9 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx) priv->net->stats.rx_packets++; priv->net->stats.rx_bytes += frame->can_dlc; + + can_led_event(priv->net, CAN_LED_EVENT_RX); + netif_rx_ni(skb); } @@ -707,6 +711,8 @@ static int mcp251x_stop(struct net_device *net) mutex_unlock(&priv->mcp_lock); + can_led_event(net, CAN_LED_EVENT_STOP); + return 0; } @@ -905,6 +911,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) if (intf & CANINTF_TX) { net->stats.tx_packets++; net->stats.tx_bytes += priv->tx_len - 1; + can_led_event(net, CAN_LED_EVENT_TX); if (priv->tx_len) { can_get_echo_skb(net, 0); priv->tx_len = 0; @@ -968,6 +975,9 @@ static int mcp251x_open(struct net_device *net) mcp251x_open_clean(net); goto open_unlock; } + + can_led_event(net, CAN_LED_EVENT_OPEN); + netif_wake_queue(net); open_unlock: @@ -1077,10 +1087,15 @@ static int mcp251x_can_probe(struct spi_device *spi) pdata->transceiver_enable(0); ret = register_candev(net); - if (!ret) { - dev_info(&spi->dev, "probed\n"); - return ret; - } + if (ret) + goto error_probe; + + devm_can_led_init(net); + + dev_info(&spi->dev, "probed\n"); + + return ret; + error_probe: if (!mcp251x_enable_dma) kfree(priv->spi_rx_buf); diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig index d38706958af6..f19be5269e7b 100644 --- a/drivers/net/can/mscan/Kconfig +++ b/drivers/net/can/mscan/Kconfig @@ -1,5 +1,5 @@ config CAN_MSCAN - depends on CAN_DEV && (PPC || M68K) + depends on PPC || M68K tristate "Support for Freescale MSCAN based chips" ---help--- The Motorola Scalable Controller Area Network (MSCAN) definition diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index 92f73c708a3d..b39ca5b3ea7f 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -1,6 +1,6 @@ menuconfig CAN_SJA1000 tristate "Philips/NXP SJA1000 devices" - depends on CAN_DEV && HAS_IOMEM + depends on HAS_IOMEM if CAN_SJA1000 @@ -99,11 +99,11 @@ config CAN_TSCAN1 tristate "TS-CAN1 PC104 boards" depends on ISA help - This driver is for Technologic Systems' TSCAN-1 PC104 boards. - http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1 - The driver supports multiple boards and automatically configures them: - PLD IO base addresses are read from jumpers JP1 and JP2, - IRQ numbers are read from jumpers JP4 and JP5, - SJA1000 IO base addresses are chosen heuristically (first that works). + This driver is for Technologic Systems' TSCAN-1 PC104 boards. + http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1 + The driver supports multiple boards and automatically configures them: + PLD IO base addresses are read from jumpers JP1 and JP2, + IRQ numbers are read from jumpers JP4 and JP5, + SJA1000 IO base addresses are chosen heuristically (first that works). endif diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c index 036a326836b2..36d298da2af6 100644 --- a/drivers/net/can/sja1000/ems_pci.c +++ b/drivers/net/can/sja1000/ems_pci.c @@ -238,7 +238,6 @@ static int ems_pci_add_card(struct pci_dev *pdev, /* Allocating card structures to hold addresses, ... */ card = kzalloc(sizeof(struct ems_pci_card), GFP_KERNEL); if (card == NULL) { - dev_err(&pdev->dev, "Unable to allocate memory\n"); pci_disable_device(pdev); return -ENOMEM; } diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 600ac7226e5c..d1e7f1006ddd 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -450,11 +450,8 @@ static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev) } else { /* create the bit banging I2C adapter structure */ card = kzalloc(sizeof(struct peak_pciec_card), GFP_KERNEL); - if (!card) { - dev_err(&pdev->dev, - "failed allocating memory for i2c chip\n"); + if (!card) return -ENOMEM; - } card->cfg_base = chan->cfg_base; card->reg_base = priv->reg_base; diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c index f1175142b0a0..1a7020ba37f5 100644 --- a/drivers/net/can/sja1000/peak_pcmcia.c +++ b/drivers/net/can/sja1000/peak_pcmcia.c @@ -660,7 +660,6 @@ static int pcan_probe(struct pcmcia_device *pdev) card = kzalloc(sizeof(struct pcan_pccard), GFP_KERNEL); if (!card) { - dev_err(&pdev->dev, "couldn't allocate card memory\n"); err = -ENOMEM; goto probe_err_2; } diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 11d1062a9449..a042cdc260dc 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -508,7 +508,6 @@ static int plx_pci_add_card(struct pci_dev *pdev, /* Allocate card structures to hold addresses, ... */ card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) { - dev_err(&pdev->dev, "Unable to allocate memory\n"); pci_disable_device(pdev); return -ENOMEM; } diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 83ee11eca0e2..daf4013a8fc7 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -60,6 +60,7 @@ #include <linux/can/dev.h> #include <linux/can/error.h> +#include <linux/can/led.h> #include "sja1000.h" @@ -368,6 +369,8 @@ static void sja1000_rx(struct net_device *dev) stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + + can_led_event(dev, CAN_LED_EVENT_RX); } static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) @@ -521,6 +524,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) can_get_echo_skb(dev, 0); } netif_wake_queue(dev); + can_led_event(dev, CAN_LED_EVENT_TX); } if (isrc & IRQ_RI) { /* receive interrupt */ @@ -575,6 +579,8 @@ static int sja1000_open(struct net_device *dev) /* init and start chi */ sja1000_start(dev); + can_led_event(dev, CAN_LED_EVENT_OPEN); + netif_start_queue(dev); return 0; @@ -592,6 +598,8 @@ static int sja1000_close(struct net_device *dev) close_candev(dev); + can_led_event(dev, CAN_LED_EVENT_STOP); + return 0; } @@ -639,6 +647,8 @@ static const struct net_device_ops sja1000_netdev_ops = { int register_sja1000dev(struct net_device *dev) { + int ret; + if (!sja1000_probe_chip(dev)) return -ENODEV; @@ -648,7 +658,12 @@ int register_sja1000dev(struct net_device *dev) set_reset_mode(dev); chipset_init(dev); - return register_candev(dev); + ret = register_candev(dev); + + if (!ret) + devm_can_led_init(dev); + + return ret; } EXPORT_SYMBOL_GPL(register_sja1000dev); diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index adc3708d8829..06b7e097d36e 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -55,6 +55,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/can.h> +#include <linux/can/skb.h> static __initconst const char banner[] = KERN_INFO "slcan: serial line CAN interface driver\n"; @@ -184,7 +185,8 @@ static void slc_bump(struct slcan *sl) cf.data[i] |= tmp; } - skb = dev_alloc_skb(sizeof(struct can_frame)); + skb = dev_alloc_skb(sizeof(struct can_frame) + + sizeof(struct can_skb_priv)); if (!skb) return; @@ -192,6 +194,10 @@ static void slc_bump(struct slcan *sl) skb->protocol = htons(ETH_P_CAN); skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; + + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = sl->dev->ifindex; + memcpy(skb_put(skb, sizeof(struct can_frame)), &cf, sizeof(struct can_frame)); netif_rx_ni(skb); diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig index 5de46a9a77bb..96b6fe158b5b 100644 --- a/drivers/net/can/softing/Kconfig +++ b/drivers/net/can/softing/Kconfig @@ -1,6 +1,6 @@ config CAN_SOFTING tristate "Softing Gmbh CAN generic support" - depends on CAN_DEV && HAS_IOMEM + depends on HAS_IOMEM ---help--- Support for CAN cards from Softing Gmbh & some cards from Vector Gmbh. diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 300581b24ff3..f21fc37ec578 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -50,6 +50,7 @@ #include <linux/can/dev.h> #include <linux/can/error.h> +#include <linux/can/led.h> #include <linux/can/platform/ti_hecc.h> #define DRV_NAME "ti_hecc" @@ -593,6 +594,7 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno) spin_unlock_irqrestore(&priv->mbx_lock, flags); stats->rx_bytes += cf->can_dlc; + can_led_event(priv->ndev, CAN_LED_EVENT_RX); netif_receive_skb(skb); stats->rx_packets++; @@ -796,6 +798,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) stats->tx_bytes += hecc_read_mbx(priv, mbxno, HECC_CANMCF) & 0xF; stats->tx_packets++; + can_led_event(ndev, CAN_LED_EVENT_TX); can_get_echo_skb(ndev, mbxno); --priv->tx_tail; } @@ -851,6 +854,8 @@ static int ti_hecc_open(struct net_device *ndev) return err; } + can_led_event(ndev, CAN_LED_EVENT_OPEN); + ti_hecc_start(ndev); napi_enable(&priv->napi); netif_start_queue(ndev); @@ -869,6 +874,8 @@ static int ti_hecc_close(struct net_device *ndev) close_candev(ndev); ti_hecc_transceiver_switch(priv, 0); + can_led_event(ndev, CAN_LED_EVENT_STOP); + return 0; } @@ -961,6 +968,9 @@ static int ti_hecc_probe(struct platform_device *pdev) dev_err(&pdev->dev, "register_candev() failed\n"); goto probe_exit_clk; } + + devm_can_led_init(ndev); + dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n", priv->base, (u32) ndev->irq); diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index a4e4bee35710..fc96a3d83ebe 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -1,5 +1,5 @@ menu "CAN USB interfaces" - depends on USB && CAN_DEV + depends on USB config CAN_EMS_USB tristate "EMS CPC-USB/ARM7 CAN/USB interface" @@ -48,4 +48,10 @@ config CAN_PEAK_USB This driver supports the PCAN-USB and PCAN-USB Pro adapters from PEAK-System Technik (http://www.peak-system.com). +config CAN_8DEV_USB + tristate "8 devices USB2CAN interface" + ---help--- + This driver supports the USB2CAN interface + from 8 devices (http://www.8devices.com). + endmenu diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile index 80a2ee41fd61..becef460a91a 100644 --- a/drivers/net/can/usb/Makefile +++ b/drivers/net/can/usb/Makefile @@ -6,5 +6,6 @@ obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ +obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index c69f0b72b352..5f9a7ad9b964 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -1014,17 +1014,13 @@ static int ems_usb_probe(struct usb_interface *intf, } dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL); - if (!dev->intr_in_buffer) { - dev_err(&intf->dev, "Couldn't alloc Intr buffer\n"); + if (!dev->intr_in_buffer) goto cleanup_intr_urb; - } dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE + sizeof(struct ems_cpc_msg), GFP_KERNEL); - if (!dev->tx_msg_buffer) { - dev_err(&intf->dev, "Couldn't alloc Tx buffer\n"); + if (!dev->tx_msg_buffer) goto cleanup_intr_in_buffer; - } usb_set_intfdata(intf, dev); diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 5b58a4d87397..45cb9f3c1324 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -561,7 +561,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); if (!buf) { - netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); return -ENOMEM; } @@ -1268,7 +1267,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); if (!buf) { - netdev_err(netdev, "No memory left for USB buffer\n"); stats->tx_dropped++; goto nobufmem; } diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index d9290ea788e0..a0f647f92bf5 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -386,7 +386,6 @@ static int peak_usb_start(struct peak_usb_device *dev) buf = kmalloc(dev->adapter->rx_buffer_size, GFP_KERNEL); if (!buf) { - netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; @@ -442,7 +441,6 @@ static int peak_usb_start(struct peak_usb_device *dev) buf = kmalloc(dev->adapter->tx_buffer_size, GFP_KERNEL); if (!buf) { - netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; @@ -634,7 +632,6 @@ static int peak_usb_restart(struct peak_usb_device *dev) /* also allocate enough space for the commands to send */ buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_ATOMIC); if (!buf) { - netdev_err(dev->netdev, "no memory left for async cmd\n"); usb_free_urb(urb); return -ENOMEM; } @@ -729,8 +726,6 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, /* allocate a buffer large enough to send commands */ dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); if (!dev->cmd_buf) { - dev_err(&intf->dev, "%s: couldn't alloc cmd buffer\n", - PCAN_USB_DRIVER_NAME); err = -ENOMEM; goto lbl_set_intf_data; } diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c new file mode 100644 index 000000000000..6e15ef08f301 --- /dev/null +++ b/drivers/net/can/usb/usb_8dev.c @@ -0,0 +1,1031 @@ +/* + * CAN driver for "8 devices" USB2CAN converter + * + * Copyright (C) 2012 Bernd Krumboeck (krumboeck@universalnet.at) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published + * by the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. + * + * This driver is inspired by the 3.2.0 version of drivers/net/can/usb/ems_usb.c + * and drivers/net/can/usb/esd_usb2.c + * + * Many thanks to Gerhard Bertelsmann (info@gerhard-bertelsmann.de) + * for testing and fixing this driver. Also many thanks to "8 devices", + * who were very cooperative and answered my questions. + */ + +#include <linux/init.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/usb.h> + +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> +#include <linux/can/led.h> + +/* driver constants */ +#define MAX_RX_URBS 20 +#define MAX_TX_URBS 20 +#define RX_BUFFER_SIZE 64 + +/* vendor and product id */ +#define USB_8DEV_VENDOR_ID 0x0483 +#define USB_8DEV_PRODUCT_ID 0x1234 + +/* endpoints */ +enum usb_8dev_endpoint { + USB_8DEV_ENDP_DATA_RX = 1, + USB_8DEV_ENDP_DATA_TX, + USB_8DEV_ENDP_CMD_RX, + USB_8DEV_ENDP_CMD_TX +}; + +/* device CAN clock */ +#define USB_8DEV_ABP_CLOCK 32000000 + +/* setup flags */ +#define USB_8DEV_SILENT 0x01 +#define USB_8DEV_LOOPBACK 0x02 +#define USB_8DEV_DISABLE_AUTO_RESTRANS 0x04 +#define USB_8DEV_STATUS_FRAME 0x08 + +/* commands */ +enum usb_8dev_cmd { + USB_8DEV_RESET = 1, + USB_8DEV_OPEN, + USB_8DEV_CLOSE, + USB_8DEV_SET_SPEED, + USB_8DEV_SET_MASK_FILTER, + USB_8DEV_GET_STATUS, + USB_8DEV_GET_STATISTICS, + USB_8DEV_GET_SERIAL, + USB_8DEV_GET_SOFTW_VER, + USB_8DEV_GET_HARDW_VER, + USB_8DEV_RESET_TIMESTAMP, + USB_8DEV_GET_SOFTW_HARDW_VER +}; + +/* command options */ +#define USB_8DEV_BAUD_MANUAL 0x09 +#define USB_8DEV_CMD_START 0x11 +#define USB_8DEV_CMD_END 0x22 + +#define USB_8DEV_CMD_SUCCESS 0 +#define USB_8DEV_CMD_ERROR 255 + +#define USB_8DEV_CMD_TIMEOUT 1000 + +/* frames */ +#define USB_8DEV_DATA_START 0x55 +#define USB_8DEV_DATA_END 0xAA + +#define USB_8DEV_TYPE_CAN_FRAME 0 +#define USB_8DEV_TYPE_ERROR_FRAME 3 + +#define USB_8DEV_EXTID 0x01 +#define USB_8DEV_RTR 0x02 +#define USB_8DEV_ERR_FLAG 0x04 + +/* status */ +#define USB_8DEV_STATUSMSG_OK 0x00 /* Normal condition. */ +#define USB_8DEV_STATUSMSG_OVERRUN 0x01 /* Overrun occured when sending */ +#define USB_8DEV_STATUSMSG_BUSLIGHT 0x02 /* Error counter has reached 96 */ +#define USB_8DEV_STATUSMSG_BUSHEAVY 0x03 /* Error count. has reached 128 */ +#define USB_8DEV_STATUSMSG_BUSOFF 0x04 /* Device is in BUSOFF */ +#define USB_8DEV_STATUSMSG_STUFF 0x20 /* Stuff Error */ +#define USB_8DEV_STATUSMSG_FORM 0x21 /* Form Error */ +#define USB_8DEV_STATUSMSG_ACK 0x23 /* Ack Error */ +#define USB_8DEV_STATUSMSG_BIT0 0x24 /* Bit1 Error */ +#define USB_8DEV_STATUSMSG_BIT1 0x25 /* Bit0 Error */ +#define USB_8DEV_STATUSMSG_CRC 0x27 /* CRC Error */ + +#define USB_8DEV_RP_MASK 0x7F /* Mask for Receive Error Bit */ + + +/* table of devices that work with this driver */ +static const struct usb_device_id usb_8dev_table[] = { + { USB_DEVICE(USB_8DEV_VENDOR_ID, USB_8DEV_PRODUCT_ID) }, + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, usb_8dev_table); + +struct usb_8dev_tx_urb_context { + struct usb_8dev_priv *priv; + + u32 echo_index; + u8 dlc; +}; + +/* Structure to hold all of our device specific stuff */ +struct usb_8dev_priv { + struct can_priv can; /* must be the first member */ + + struct sk_buff *echo_skb[MAX_TX_URBS]; + + struct usb_device *udev; + struct net_device *netdev; + + atomic_t active_tx_urbs; + struct usb_anchor tx_submitted; + struct usb_8dev_tx_urb_context tx_contexts[MAX_TX_URBS]; + + struct usb_anchor rx_submitted; + + struct can_berr_counter bec; + + u8 *cmd_msg_buffer; + + struct mutex usb_8dev_cmd_lock; + +}; + +/* tx frame */ +struct __packed usb_8dev_tx_msg { + u8 begin; + u8 flags; /* RTR and EXT_ID flag */ + __be32 id; /* upper 3 bits not used */ + u8 dlc; /* data length code 0-8 bytes */ + u8 data[8]; /* 64-bit data */ + u8 end; +}; + +/* rx frame */ +struct __packed usb_8dev_rx_msg { + u8 begin; + u8 type; /* frame type */ + u8 flags; /* RTR and EXT_ID flag */ + __be32 id; /* upper 3 bits not used */ + u8 dlc; /* data length code 0-8 bytes */ + u8 data[8]; /* 64-bit data */ + __be32 timestamp; /* 32-bit timestamp */ + u8 end; +}; + +/* command frame */ +struct __packed usb_8dev_cmd_msg { + u8 begin; + u8 channel; /* unkown - always 0 */ + u8 command; /* command to execute */ + u8 opt1; /* optional parameter / return value */ + u8 opt2; /* optional parameter 2 */ + u8 data[10]; /* optional parameter and data */ + u8 end; +}; + +static int usb_8dev_send_cmd_msg(struct usb_8dev_priv *priv, u8 *msg, int size) +{ + int actual_length; + + return usb_bulk_msg(priv->udev, + usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_CMD_TX), + msg, size, &actual_length, USB_8DEV_CMD_TIMEOUT); +} + +static int usb_8dev_wait_cmd_msg(struct usb_8dev_priv *priv, u8 *msg, int size, + int *actual_length) +{ + return usb_bulk_msg(priv->udev, + usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_CMD_RX), + msg, size, actual_length, USB_8DEV_CMD_TIMEOUT); +} + +/* Send command to device and receive result. + * Command was successful when opt1 = 0. + */ +static int usb_8dev_send_cmd(struct usb_8dev_priv *priv, + struct usb_8dev_cmd_msg *out, + struct usb_8dev_cmd_msg *in) +{ + int err; + int num_bytes_read; + struct net_device *netdev; + + netdev = priv->netdev; + + out->begin = USB_8DEV_CMD_START; + out->end = USB_8DEV_CMD_END; + + mutex_lock(&priv->usb_8dev_cmd_lock); + + memcpy(priv->cmd_msg_buffer, out, + sizeof(struct usb_8dev_cmd_msg)); + + err = usb_8dev_send_cmd_msg(priv, priv->cmd_msg_buffer, + sizeof(struct usb_8dev_cmd_msg)); + if (err < 0) { + netdev_err(netdev, "sending command message failed\n"); + goto failed; + } + + err = usb_8dev_wait_cmd_msg(priv, priv->cmd_msg_buffer, + sizeof(struct usb_8dev_cmd_msg), + &num_bytes_read); + if (err < 0) { + netdev_err(netdev, "no command message answer\n"); + goto failed; + } + + memcpy(in, priv->cmd_msg_buffer, sizeof(struct usb_8dev_cmd_msg)); + + if (in->begin != USB_8DEV_CMD_START || in->end != USB_8DEV_CMD_END || + num_bytes_read != 16 || in->opt1 != 0) + err = -EPROTO; + +failed: + mutex_unlock(&priv->usb_8dev_cmd_lock); + return err; +} + +/* Send open command to device */ +static int usb_8dev_cmd_open(struct usb_8dev_priv *priv) +{ + struct can_bittiming *bt = &priv->can.bittiming; + struct usb_8dev_cmd_msg outmsg; + struct usb_8dev_cmd_msg inmsg; + u32 ctrlmode = priv->can.ctrlmode; + u32 flags = USB_8DEV_STATUS_FRAME; + __be32 beflags; + __be16 bebrp; + + memset(&outmsg, 0, sizeof(outmsg)); + outmsg.command = USB_8DEV_OPEN; + outmsg.opt1 = USB_8DEV_BAUD_MANUAL; + outmsg.data[0] = bt->prop_seg + bt->phase_seg1; + outmsg.data[1] = bt->phase_seg2; + outmsg.data[2] = bt->sjw; + + /* BRP */ + bebrp = cpu_to_be16((u16)bt->brp); + memcpy(&outmsg.data[3], &bebrp, sizeof(bebrp)); + + /* flags */ + if (ctrlmode & CAN_CTRLMODE_LOOPBACK) + flags |= USB_8DEV_LOOPBACK; + if (ctrlmode & CAN_CTRLMODE_LISTENONLY) + flags |= USB_8DEV_SILENT; + if (ctrlmode & CAN_CTRLMODE_ONE_SHOT) + flags |= USB_8DEV_DISABLE_AUTO_RESTRANS; + + beflags = cpu_to_be32(flags); + memcpy(&outmsg.data[5], &beflags, sizeof(beflags)); + + return usb_8dev_send_cmd(priv, &outmsg, &inmsg); +} + +/* Send close command to device */ +static int usb_8dev_cmd_close(struct usb_8dev_priv *priv) +{ + struct usb_8dev_cmd_msg inmsg; + struct usb_8dev_cmd_msg outmsg = { + .channel = 0, + .command = USB_8DEV_CLOSE, + .opt1 = 0, + .opt2 = 0 + }; + + return usb_8dev_send_cmd(priv, &outmsg, &inmsg); +} + +/* Get firmware and hardware version */ +static int usb_8dev_cmd_version(struct usb_8dev_priv *priv, u32 *res) +{ + struct usb_8dev_cmd_msg inmsg; + struct usb_8dev_cmd_msg outmsg = { + .channel = 0, + .command = USB_8DEV_GET_SOFTW_HARDW_VER, + .opt1 = 0, + .opt2 = 0 + }; + + int err = usb_8dev_send_cmd(priv, &outmsg, &inmsg); + if (err) + return err; + + *res = be32_to_cpup((__be32 *)inmsg.data); + + return err; +} + +/* Set network device mode + * + * Maybe we should leave this function empty, because the device + * set mode variable with open command. + */ +static int usb_8dev_set_mode(struct net_device *netdev, enum can_mode mode) +{ + struct usb_8dev_priv *priv = netdev_priv(netdev); + int err = 0; + + switch (mode) { + case CAN_MODE_START: + err = usb_8dev_cmd_open(priv); + if (err) + netdev_warn(netdev, "couldn't start device"); + break; + + default: + return -EOPNOTSUPP; + } + + return err; +} + +/* Read error/status frames */ +static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, + struct usb_8dev_rx_msg *msg) +{ + struct can_frame *cf; + struct sk_buff *skb; + struct net_device_stats *stats = &priv->netdev->stats; + + /* Error message: + * byte 0: Status + * byte 1: bit 7: Receive Passive + * byte 1: bit 0-6: Receive Error Counter + * byte 2: Transmit Error Counter + * byte 3: Always 0 (maybe reserved for future use) + */ + + u8 state = msg->data[0]; + u8 rxerr = msg->data[1] & USB_8DEV_RP_MASK; + u8 txerr = msg->data[2]; + int rx_errors = 0; + int tx_errors = 0; + + skb = alloc_can_err_skb(priv->netdev, &cf); + if (!skb) + return; + + switch (state) { + case USB_8DEV_STATUSMSG_OK: + priv->can.state = CAN_STATE_ERROR_ACTIVE; + cf->can_id |= CAN_ERR_PROT; + cf->data[2] = CAN_ERR_PROT_ACTIVE; + break; + case USB_8DEV_STATUSMSG_BUSOFF: + priv->can.state = CAN_STATE_BUS_OFF; + cf->can_id |= CAN_ERR_BUSOFF; + can_bus_off(priv->netdev); + break; + case USB_8DEV_STATUSMSG_OVERRUN: + case USB_8DEV_STATUSMSG_BUSLIGHT: + case USB_8DEV_STATUSMSG_BUSHEAVY: + cf->can_id |= CAN_ERR_CRTL; + break; + default: + priv->can.state = CAN_STATE_ERROR_WARNING; + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + priv->can.can_stats.bus_error++; + break; + } + + switch (state) { + case USB_8DEV_STATUSMSG_OK: + case USB_8DEV_STATUSMSG_BUSOFF: + break; + case USB_8DEV_STATUSMSG_ACK: + cf->can_id |= CAN_ERR_ACK; + tx_errors = 1; + break; + case USB_8DEV_STATUSMSG_CRC: + cf->data[2] |= CAN_ERR_PROT_UNSPEC; + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | + CAN_ERR_PROT_LOC_CRC_DEL; + rx_errors = 1; + break; + case USB_8DEV_STATUSMSG_BIT0: + cf->data[2] |= CAN_ERR_PROT_BIT0; + tx_errors = 1; + break; + case USB_8DEV_STATUSMSG_BIT1: + cf->data[2] |= CAN_ERR_PROT_BIT1; + tx_errors = 1; + break; + case USB_8DEV_STATUSMSG_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + rx_errors = 1; + break; + case USB_8DEV_STATUSMSG_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + rx_errors = 1; + break; + case USB_8DEV_STATUSMSG_OVERRUN: + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + stats->rx_over_errors++; + rx_errors = 1; + break; + case USB_8DEV_STATUSMSG_BUSLIGHT: + priv->can.state = CAN_STATE_ERROR_WARNING; + cf->data[1] = (txerr > rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + priv->can.can_stats.error_warning++; + break; + case USB_8DEV_STATUSMSG_BUSHEAVY: + priv->can.state = CAN_STATE_ERROR_PASSIVE; + cf->data[1] = (txerr > rxerr) ? + CAN_ERR_CRTL_TX_PASSIVE : + CAN_ERR_CRTL_RX_PASSIVE; + priv->can.can_stats.error_passive++; + break; + default: + netdev_warn(priv->netdev, + "Unknown status/error message (%d)\n", state); + break; + } + + if (tx_errors) { + cf->data[2] |= CAN_ERR_PROT_TX; + stats->tx_errors++; + } + + if (rx_errors) + stats->rx_errors++; + + cf->data[6] = txerr; + cf->data[7] = rxerr; + + priv->bec.txerr = txerr; + priv->bec.rxerr = rxerr; + + netif_rx(skb); + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; +} + +/* Read data and status frames */ +static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv, + struct usb_8dev_rx_msg *msg) +{ + struct can_frame *cf; + struct sk_buff *skb; + struct net_device_stats *stats = &priv->netdev->stats; + + if (msg->type == USB_8DEV_TYPE_ERROR_FRAME && + msg->flags == USB_8DEV_ERR_FLAG) { + usb_8dev_rx_err_msg(priv, msg); + } else if (msg->type == USB_8DEV_TYPE_CAN_FRAME) { + skb = alloc_can_skb(priv->netdev, &cf); + if (!skb) + return; + + cf->can_id = be32_to_cpu(msg->id); + cf->can_dlc = get_can_dlc(msg->dlc & 0xF); + + if (msg->flags & USB_8DEV_EXTID) + cf->can_id |= CAN_EFF_FLAG; + + if (msg->flags & USB_8DEV_RTR) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, msg->data, cf->can_dlc); + + netif_rx(skb); + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + + can_led_event(priv->netdev, CAN_LED_EVENT_RX); + } else { + netdev_warn(priv->netdev, "frame type %d unknown", + msg->type); + } + +} + +/* Callback for reading data from device + * + * Check urb status, call read function and resubmit urb read operation. + */ +static void usb_8dev_read_bulk_callback(struct urb *urb) +{ + struct usb_8dev_priv *priv = urb->context; + struct net_device *netdev; + int retval; + int pos = 0; + + netdev = priv->netdev; + + if (!netif_device_present(netdev)) + return; + + switch (urb->status) { + case 0: /* success */ + break; + + case -ENOENT: + case -ESHUTDOWN: + return; + + default: + netdev_info(netdev, "Rx URB aborted (%d)\n", + urb->status); + goto resubmit_urb; + } + + while (pos < urb->actual_length) { + struct usb_8dev_rx_msg *msg; + + if (pos + sizeof(struct usb_8dev_rx_msg) > urb->actual_length) { + netdev_err(priv->netdev, "format error\n"); + break; + } + + msg = (struct usb_8dev_rx_msg *)(urb->transfer_buffer + pos); + usb_8dev_rx_can_msg(priv, msg); + + pos += sizeof(struct usb_8dev_rx_msg); + } + +resubmit_urb: + usb_fill_bulk_urb(urb, priv->udev, + usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_RX), + urb->transfer_buffer, RX_BUFFER_SIZE, + usb_8dev_read_bulk_callback, priv); + + retval = usb_submit_urb(urb, GFP_ATOMIC); + + if (retval == -ENODEV) + netif_device_detach(netdev); + else if (retval) + netdev_err(netdev, + "failed resubmitting read bulk urb: %d\n", retval); +} + +/* Callback handler for write operations + * + * Free allocated buffers, check transmit status and + * calculate statistic. + */ +static void usb_8dev_write_bulk_callback(struct urb *urb) +{ + struct usb_8dev_tx_urb_context *context = urb->context; + struct usb_8dev_priv *priv; + struct net_device *netdev; + + BUG_ON(!context); + + priv = context->priv; + netdev = priv->netdev; + + /* free up our allocated buffer */ + usb_free_coherent(urb->dev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); + + atomic_dec(&priv->active_tx_urbs); + + if (!netif_device_present(netdev)) + return; + + if (urb->status) + netdev_info(netdev, "Tx URB aborted (%d)\n", + urb->status); + + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += context->dlc; + + can_get_echo_skb(netdev, context->echo_index); + + can_led_event(netdev, CAN_LED_EVENT_TX); + + /* Release context */ + context->echo_index = MAX_TX_URBS; + + netif_wake_queue(netdev); +} + +/* Send data to device */ +static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct usb_8dev_priv *priv = netdev_priv(netdev); + struct net_device_stats *stats = &netdev->stats; + struct can_frame *cf = (struct can_frame *) skb->data; + struct usb_8dev_tx_msg *msg; + struct urb *urb; + struct usb_8dev_tx_urb_context *context = NULL; + u8 *buf; + int i, err; + size_t size = sizeof(struct usb_8dev_tx_msg); + + if (can_dropped_invalid_skb(netdev, skb)) + return NETDEV_TX_OK; + + /* create a URB, and a buffer for it, and copy the data to the URB */ + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + netdev_err(netdev, "No memory left for URBs\n"); + goto nomem; + } + + buf = usb_alloc_coherent(priv->udev, size, GFP_ATOMIC, + &urb->transfer_dma); + if (!buf) { + netdev_err(netdev, "No memory left for USB buffer\n"); + goto nomembuf; + } + + memset(buf, 0, size); + + msg = (struct usb_8dev_tx_msg *)buf; + msg->begin = USB_8DEV_DATA_START; + msg->flags = 0x00; + + if (cf->can_id & CAN_RTR_FLAG) + msg->flags |= USB_8DEV_RTR; + + if (cf->can_id & CAN_EFF_FLAG) + msg->flags |= USB_8DEV_EXTID; + + msg->id = cpu_to_be32(cf->can_id & CAN_ERR_MASK); + msg->dlc = cf->can_dlc; + memcpy(msg->data, cf->data, cf->can_dlc); + msg->end = USB_8DEV_DATA_END; + + for (i = 0; i < MAX_TX_URBS; i++) { + if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { + context = &priv->tx_contexts[i]; + break; + } + } + + /* May never happen! When this happens we'd more URBs in flight as + * allowed (MAX_TX_URBS). + */ + if (!context) + goto nofreecontext; + + context->priv = priv; + context->echo_index = i; + context->dlc = cf->can_dlc; + + usb_fill_bulk_urb(urb, priv->udev, + usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX), + buf, size, usb_8dev_write_bulk_callback, context); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + usb_anchor_urb(urb, &priv->tx_submitted); + + can_put_echo_skb(skb, netdev, context->echo_index); + + atomic_inc(&priv->active_tx_urbs); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (unlikely(err)) + goto failed; + else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) + /* Slow down tx path */ + netif_stop_queue(netdev); + + /* Release our reference to this URB, the USB core will eventually free + * it entirely. + */ + usb_free_urb(urb); + + return NETDEV_TX_OK; + +nofreecontext: + usb_unanchor_urb(urb); + usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); + + netdev_warn(netdev, "couldn't find free context"); + + return NETDEV_TX_BUSY; + +failed: + can_free_echo_skb(netdev, context->echo_index); + + usb_unanchor_urb(urb); + usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); + + atomic_dec(&priv->active_tx_urbs); + + if (err == -ENODEV) + netif_device_detach(netdev); + else + netdev_warn(netdev, "failed tx_urb %d\n", err); + +nomembuf: + usb_free_urb(urb); + +nomem: + dev_kfree_skb(skb); + stats->tx_dropped++; + + return NETDEV_TX_OK; +} + +static int usb_8dev_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec) +{ + struct usb_8dev_priv *priv = netdev_priv(netdev); + + bec->txerr = priv->bec.txerr; + bec->rxerr = priv->bec.rxerr; + + return 0; +} + +/* Start USB device */ +static int usb_8dev_start(struct usb_8dev_priv *priv) +{ + struct net_device *netdev = priv->netdev; + int err, i; + + for (i = 0; i < MAX_RX_URBS; i++) { + struct urb *urb = NULL; + u8 *buf; + + /* create a URB, and a buffer for it */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + netdev_err(netdev, "No memory left for URBs\n"); + err = -ENOMEM; + break; + } + + buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL, + &urb->transfer_dma); + if (!buf) { + netdev_err(netdev, "No memory left for USB buffer\n"); + usb_free_urb(urb); + err = -ENOMEM; + break; + } + + usb_fill_bulk_urb(urb, priv->udev, + usb_rcvbulkpipe(priv->udev, + USB_8DEV_ENDP_DATA_RX), + buf, RX_BUFFER_SIZE, + usb_8dev_read_bulk_callback, priv); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + usb_anchor_urb(urb, &priv->rx_submitted); + + err = usb_submit_urb(urb, GFP_KERNEL); + if (err) { + usb_unanchor_urb(urb); + usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf, + urb->transfer_dma); + break; + } + + /* Drop reference, USB core will take care of freeing it */ + usb_free_urb(urb); + } + + /* Did we submit any URBs */ + if (i == 0) { + netdev_warn(netdev, "couldn't setup read URBs\n"); + return err; + } + + /* Warn if we've couldn't transmit all the URBs */ + if (i < MAX_RX_URBS) + netdev_warn(netdev, "rx performance may be slow\n"); + + err = usb_8dev_cmd_open(priv); + if (err) + goto failed; + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + return 0; + +failed: + if (err == -ENODEV) + netif_device_detach(priv->netdev); + + netdev_warn(netdev, "couldn't submit control: %d\n", err); + + return err; +} + +/* Open USB device */ +static int usb_8dev_open(struct net_device *netdev) +{ + struct usb_8dev_priv *priv = netdev_priv(netdev); + int err; + + /* common open */ + err = open_candev(netdev); + if (err) + return err; + + can_led_event(netdev, CAN_LED_EVENT_OPEN); + + /* finally start device */ + err = usb_8dev_start(priv); + if (err) { + if (err == -ENODEV) + netif_device_detach(priv->netdev); + + netdev_warn(netdev, "couldn't start device: %d\n", + err); + + close_candev(netdev); + + return err; + } + + netif_start_queue(netdev); + + return 0; +} + +static void unlink_all_urbs(struct usb_8dev_priv *priv) +{ + int i; + + usb_kill_anchored_urbs(&priv->rx_submitted); + + usb_kill_anchored_urbs(&priv->tx_submitted); + atomic_set(&priv->active_tx_urbs, 0); + + for (i = 0; i < MAX_TX_URBS; i++) + priv->tx_contexts[i].echo_index = MAX_TX_URBS; +} + +/* Close USB device */ +static int usb_8dev_close(struct net_device *netdev) +{ + struct usb_8dev_priv *priv = netdev_priv(netdev); + int err = 0; + + /* Send CLOSE command to CAN controller */ + err = usb_8dev_cmd_close(priv); + if (err) + netdev_warn(netdev, "couldn't stop device"); + + priv->can.state = CAN_STATE_STOPPED; + + netif_stop_queue(netdev); + + /* Stop polling */ + unlink_all_urbs(priv); + + close_candev(netdev); + + can_led_event(netdev, CAN_LED_EVENT_STOP); + + return err; +} + +static const struct net_device_ops usb_8dev_netdev_ops = { + .ndo_open = usb_8dev_open, + .ndo_stop = usb_8dev_close, + .ndo_start_xmit = usb_8dev_start_xmit, +}; + +static const struct can_bittiming_const usb_8dev_bittiming_const = { + .name = "usb_8dev", + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + +/* Probe USB device + * + * Check device and firmware. + * Set supported modes and bittiming constants. + * Allocate some memory. + */ +static int usb_8dev_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct net_device *netdev; + struct usb_8dev_priv *priv; + int i, err = -ENOMEM; + u32 version; + char buf[18]; + struct usb_device *usbdev = interface_to_usbdev(intf); + + /* product id looks strange, better we also check iProduct string */ + if (usb_string(usbdev, usbdev->descriptor.iProduct, buf, + sizeof(buf)) > 0 && strcmp(buf, "USB2CAN converter")) { + dev_info(&usbdev->dev, "ignoring: not an USB2CAN converter\n"); + return -ENODEV; + } + + netdev = alloc_candev(sizeof(struct usb_8dev_priv), MAX_TX_URBS); + if (!netdev) { + dev_err(&intf->dev, "Couldn't alloc candev\n"); + return -ENOMEM; + } + + priv = netdev_priv(netdev); + + priv->udev = usbdev; + priv->netdev = netdev; + + priv->can.state = CAN_STATE_STOPPED; + priv->can.clock.freq = USB_8DEV_ABP_CLOCK; + priv->can.bittiming_const = &usb_8dev_bittiming_const; + priv->can.do_set_mode = usb_8dev_set_mode; + priv->can.do_get_berr_counter = usb_8dev_get_berr_counter; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_ONE_SHOT; + + netdev->netdev_ops = &usb_8dev_netdev_ops; + + netdev->flags |= IFF_ECHO; /* we support local echo */ + + init_usb_anchor(&priv->rx_submitted); + + init_usb_anchor(&priv->tx_submitted); + atomic_set(&priv->active_tx_urbs, 0); + + for (i = 0; i < MAX_TX_URBS; i++) + priv->tx_contexts[i].echo_index = MAX_TX_URBS; + + priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg), + GFP_KERNEL); + if (!priv->cmd_msg_buffer) + goto cleanup_candev; + + usb_set_intfdata(intf, priv); + + SET_NETDEV_DEV(netdev, &intf->dev); + + mutex_init(&priv->usb_8dev_cmd_lock); + + err = register_candev(netdev); + if (err) { + netdev_err(netdev, + "couldn't register CAN device: %d\n", err); + goto cleanup_cmd_msg_buffer; + } + + err = usb_8dev_cmd_version(priv, &version); + if (err) { + netdev_err(netdev, "can't get firmware version\n"); + goto cleanup_cmd_msg_buffer; + } else { + netdev_info(netdev, + "firmware: %d.%d, hardware: %d.%d\n", + (version>>24) & 0xff, (version>>16) & 0xff, + (version>>8) & 0xff, version & 0xff); + } + + devm_can_led_init(netdev); + + return 0; + +cleanup_cmd_msg_buffer: + kfree(priv->cmd_msg_buffer); + +cleanup_candev: + free_candev(netdev); + + return err; + +} + +/* Called by the usb core when driver is unloaded or device is removed */ +static void usb_8dev_disconnect(struct usb_interface *intf) +{ + struct usb_8dev_priv *priv = usb_get_intfdata(intf); + + usb_set_intfdata(intf, NULL); + + if (priv) { + netdev_info(priv->netdev, "device disconnected\n"); + + unregister_netdev(priv->netdev); + free_candev(priv->netdev); + + unlink_all_urbs(priv); + } + +} + +static struct usb_driver usb_8dev_driver = { + .name = "usb_8dev", + .probe = usb_8dev_probe, + .disconnect = usb_8dev_disconnect, + .id_table = usb_8dev_table, +}; + +module_usb_driver(usb_8dev_driver); + +MODULE_AUTHOR("Bernd Krumboeck <krumboeck@universalnet.at>"); +MODULE_DESCRIPTION("CAN driver for 8 devices USB2CAN interfaces"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 021d69c5d9bc..29e272cc7a98 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -1448,10 +1448,10 @@ static int e100_set_settings(struct net_device *dev, static void e100_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strncpy(info->driver, "ETRAX 100LX", sizeof(info->driver) - 1); - strncpy(info->version, "$Revision: 1.31 $", sizeof(info->version) - 1); - strncpy(info->fw_version, "N/A", sizeof(info->fw_version) - 1); - strncpy(info->bus_info, "N/A", sizeof(info->bus_info) - 1); + strlcpy(info->driver, "ETRAX 100LX", sizeof(info->driver)); + strlcpy(info->version, "$Revision: 1.31 $", sizeof(info->version)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); } static int e100_nway_reset(struct net_device *dev) diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 325391d19bad..7a54ec04b418 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -8,6 +8,8 @@ * (at your option) any later version. */ +#include <linux/delay.h> +#include <linux/jiffies.h> #include <linux/list.h> #include <linux/module.h> #include <linux/netdevice.h> @@ -66,36 +68,30 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds) { int i; int ret; + unsigned long timeout; - /* - * Set all ports to the disabled state. - */ + /* Set all ports to the disabled state. */ for (i = 0; i < 6; i++) { ret = REG_READ(REG_PORT(i), 0x04); REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); } - /* - * Wait for transmit queues to drain. - */ - msleep(2); + /* Wait for transmit queues to drain. */ + usleep_range(2000, 4000); - /* - * Reset the switch. - */ + /* Reset the switch. */ REG_WRITE(REG_GLOBAL, 0x0a, 0xa130); - /* - * Wait up to one second for reset to complete. - */ - for (i = 0; i < 1000; i++) { + /* Wait up to one second for reset to complete. */ + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { ret = REG_READ(REG_GLOBAL, 0x00); if ((ret & 0x8000) == 0x0000) break; - msleep(1); + usleep_range(1000, 2000); } - if (i == 1000) + if (time_after(jiffies, timeout)) return -ETIMEDOUT; return 0; @@ -103,15 +99,13 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds) static int mv88e6060_setup_global(struct dsa_switch *ds) { - /* - * Disable discarding of frames with excessive collisions, + /* Disable discarding of frames with excessive collisions, * set the maximum frame size to 1536 bytes, and mask all * interrupt sources. */ REG_WRITE(REG_GLOBAL, 0x04, 0x0800); - /* - * Enable automatic address learning, set the address + /* Enable automatic address learning, set the address * database size to 1024 entries, and set the default aging * time to 5 minutes. */ @@ -124,16 +118,14 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p) { int addr = REG_PORT(p); - /* - * Do not force flow control, disable Ingress and Egress + /* Do not force flow control, disable Ingress and Egress * Header tagging, disable VLAN tunneling, and set the port * state to Forwarding. Additionally, if this is the CPU * port, enable Ingress and Egress Trailer tagging mode. */ REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003); - /* - * Port based VLAN map: give each port its own address + /* Port based VLAN map: give each port its own address * database, allow the CPU port to talk to each of the 'real' * ports, and allow each of the 'real' ports to only talk to * the CPU port. @@ -144,8 +136,7 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p) ds->phys_port_mask : (1 << ds->dst->cpu_port))); - /* - * Port Association Vector: when learning source addresses + /* Port Association Vector: when learning source addresses * of packets, add the address to the address database using * a port bitmap that has only the bit for this port set and * the other bits clear. @@ -245,7 +236,7 @@ static void mv88e6060_poll_link(struct dsa_switch *ds) if (!link) { if (netif_carrier_ok(dev)) { - printk(KERN_INFO "%s: link down\n", dev->name); + netdev_info(dev, "link down\n"); netif_carrier_off(dev); } continue; @@ -256,10 +247,11 @@ static void mv88e6060_poll_link(struct dsa_switch *ds) fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0; if (!netif_carrier_ok(dev)) { - printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " - "flow control %sabled\n", dev->name, - speed, duplex ? "full" : "half", - fc ? "en" : "dis"); + netdev_info(dev, + "link up, %d Mb/s, %s duplex, flow control %sabled\n", + speed, + duplex ? "full" : "half", + fc ? "en" : "dis"); netif_carrier_on(dev); } } diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c index c17c75b9f531..41ee5b6ae917 100644 --- a/drivers/net/dsa/mv88e6123_61_65.c +++ b/drivers/net/dsa/mv88e6123_61_65.c @@ -8,6 +8,8 @@ * (at your option) any later version. */ +#include <linux/delay.h> +#include <linux/jiffies.h> #include <linux/list.h> #include <linux/module.h> #include <linux/netdevice.h> @@ -50,36 +52,30 @@ static int mv88e6123_61_65_switch_reset(struct dsa_switch *ds) { int i; int ret; + unsigned long timeout; - /* - * Set all ports to the disabled state. - */ + /* Set all ports to the disabled state. */ for (i = 0; i < 8; i++) { ret = REG_READ(REG_PORT(i), 0x04); REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); } - /* - * Wait for transmit queues to drain. - */ - msleep(2); + /* Wait for transmit queues to drain. */ + usleep_range(2000, 4000); - /* - * Reset the switch. - */ + /* Reset the switch. */ REG_WRITE(REG_GLOBAL, 0x04, 0xc400); - /* - * Wait up to one second for reset to complete. - */ - for (i = 0; i < 1000; i++) { + /* Wait up to one second for reset to complete. */ + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { ret = REG_READ(REG_GLOBAL, 0x00); if ((ret & 0xc800) == 0xc800) break; - msleep(1); + usleep_range(1000, 2000); } - if (i == 1000) + if (time_after(jiffies, timeout)) return -ETIMEDOUT; return 0; @@ -90,54 +86,45 @@ static int mv88e6123_61_65_setup_global(struct dsa_switch *ds) int ret; int i; - /* - * Disable the PHY polling unit (since there won't be any + /* Disable the PHY polling unit (since there won't be any * external PHYs to poll), don't discard packets with * excessive collisions, and mask all interrupt sources. */ REG_WRITE(REG_GLOBAL, 0x04, 0x0000); - /* - * Set the default address aging time to 5 minutes, and + /* Set the default address aging time to 5 minutes, and * enable address learn messages to be sent to all message * ports. */ REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); - /* - * Configure the priority mapping registers. - */ + /* Configure the priority mapping registers. */ ret = mv88e6xxx_config_prio(ds); if (ret < 0) return ret; - /* - * Configure the upstream port, and configure the upstream + /* Configure the upstream port, and configure the upstream * port as the port to which ingress and egress monitor frames * are to be sent. */ REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110)); - /* - * Disable remote management for now, and set the switch's + /* Disable remote management for now, and set the switch's * DSA device number. */ REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f); - /* - * Send all frames with destination addresses matching + /* Send all frames with destination addresses matching * 01:80:c2:00:00:2x to the CPU port. */ REG_WRITE(REG_GLOBAL2, 0x02, 0xffff); - /* - * Send all frames with destination addresses matching + /* Send all frames with destination addresses matching * 01:80:c2:00:00:0x to the CPU port. */ REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); - /* - * Disable the loopback filter, disable flow control + /* Disable the loopback filter, disable flow control * messages, disable flood broadcast override, disable * removing of provider tags, disable ATU age violation * interrupts, disable tag flow control, force flow @@ -146,9 +133,7 @@ static int mv88e6123_61_65_setup_global(struct dsa_switch *ds) */ REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); - /* - * Program the DSA routing table. - */ + /* Program the DSA routing table. */ for (i = 0; i < 32; i++) { int nexthop; @@ -159,33 +144,24 @@ static int mv88e6123_61_65_setup_global(struct dsa_switch *ds) REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); } - /* - * Clear all trunk masks. - */ + /* Clear all trunk masks. */ for (i = 0; i < 8; i++) REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff); - /* - * Clear all trunk mappings. - */ + /* Clear all trunk mappings. */ for (i = 0; i < 16; i++) REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); - /* - * Disable ingress rate limiting by resetting all ingress + /* Disable ingress rate limiting by resetting all ingress * rate limit registers to their initial state. */ for (i = 0; i < 6; i++) REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8)); - /* - * Initialise cross-chip port VLAN table to reset defaults. - */ + /* Initialise cross-chip port VLAN table to reset defaults. */ REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000); - /* - * Clear the priority override table. - */ + /* Clear the priority override table. */ for (i = 0; i < 16; i++) REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8)); @@ -199,8 +175,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p) int addr = REG_PORT(p); u16 val; - /* - * MAC Forcing register: don't force link, speed, duplex + /* MAC Forcing register: don't force link, speed, duplex * or flow control state to any particular values on physical * ports, but force the CPU port and all DSA ports to 1000 Mb/s * full duplex. @@ -210,15 +185,13 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p) else REG_WRITE(addr, 0x01, 0x0003); - /* - * Do not limit the period of time that this port can be + /* Do not limit the period of time that this port can be * paused for by the remote end or the period of time that * this port can pause the remote end. */ REG_WRITE(addr, 0x02, 0x0000); - /* - * Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, + /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, * disable Header mode, enable IGMP/MLD snooping, disable VLAN * tunneling, determine priority by looking at 802.1p and IP * priority fields (IP prio has precedence), and set STP state @@ -245,14 +218,12 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p) val |= 0x000c; REG_WRITE(addr, 0x04, val); - /* - * Port Control 1: disable trunking. Also, if this is the + /* Port Control 1: disable trunking. Also, if this is the * CPU port, enable learn messages to be sent to this port. */ REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); - /* - * Port based VLAN map: give each port its own address + /* Port based VLAN map: give each port its own address * database, allow the CPU port to talk to each of the 'real' * ports, and allow each of the 'real' ports to only talk to * the upstream port. @@ -264,14 +235,12 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p) val |= 1 << dsa_upstream_port(ds); REG_WRITE(addr, 0x06, val); - /* - * Default VLAN ID and priority: don't set a default VLAN + /* Default VLAN ID and priority: don't set a default VLAN * ID, and set the default packet priority to zero. */ REG_WRITE(addr, 0x07, 0x0000); - /* - * Port Control 2: don't force a good FCS, set the maximum + /* Port Control 2: don't force a good FCS, set the maximum * frame size to 10240 bytes, don't let the switch add or * strip 802.1q tags, don't discard tagged or untagged frames * on this port, do a destination address lookup on all @@ -281,48 +250,36 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p) */ REG_WRITE(addr, 0x08, 0x2080); - /* - * Egress rate control: disable egress rate control. - */ + /* Egress rate control: disable egress rate control. */ REG_WRITE(addr, 0x09, 0x0001); - /* - * Egress rate control 2: disable egress rate control. - */ + /* Egress rate control 2: disable egress rate control. */ REG_WRITE(addr, 0x0a, 0x0000); - /* - * Port Association Vector: when learning source addresses + /* Port Association Vector: when learning source addresses * of packets, add the address to the address database using * a port bitmap that has only the bit for this port set and * the other bits clear. */ REG_WRITE(addr, 0x0b, 1 << p); - /* - * Port ATU control: disable limiting the number of address + /* Port ATU control: disable limiting the number of address * database entries that this port is allowed to use. */ REG_WRITE(addr, 0x0c, 0x0000); - /* - * Priorit Override: disable DA, SA and VTU priority override. - */ + /* Priority Override: disable DA, SA and VTU priority override. */ REG_WRITE(addr, 0x0d, 0x0000); - /* - * Port Ethertype: use the Ethertype DSA Ethertype value. - */ + /* Port Ethertype: use the Ethertype DSA Ethertype value. */ REG_WRITE(addr, 0x0f, ETH_P_EDSA); - /* - * Tag Remap: use an identity 802.1p prio -> switch prio + /* Tag Remap: use an identity 802.1p prio -> switch prio * mapping. */ REG_WRITE(addr, 0x18, 0x3210); - /* - * Tag Remap 2: use an identity 802.1p prio -> switch prio + /* Tag Remap 2: use an identity 802.1p prio -> switch prio * mapping. */ REG_WRITE(addr, 0x19, 0x7654); diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 55888b06d8b4..dadfafba64e9 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -8,6 +8,8 @@ * (at your option) any later version. */ +#include <linux/delay.h> +#include <linux/jiffies.h> #include <linux/list.h> #include <linux/module.h> #include <linux/netdevice.h> @@ -15,9 +17,7 @@ #include <net/dsa.h> #include "mv88e6xxx.h" -/* - * Switch product IDs - */ +/* Switch product IDs */ #define ID_6085 0x04a0 #define ID_6095 0x0950 #define ID_6131 0x1060 @@ -44,36 +44,30 @@ static int mv88e6131_switch_reset(struct dsa_switch *ds) { int i; int ret; + unsigned long timeout; - /* - * Set all ports to the disabled state. - */ + /* Set all ports to the disabled state. */ for (i = 0; i < 11; i++) { ret = REG_READ(REG_PORT(i), 0x04); REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); } - /* - * Wait for transmit queues to drain. - */ - msleep(2); + /* Wait for transmit queues to drain. */ + usleep_range(2000, 4000); - /* - * Reset the switch. - */ + /* Reset the switch. */ REG_WRITE(REG_GLOBAL, 0x04, 0xc400); - /* - * Wait up to one second for reset to complete. - */ - for (i = 0; i < 1000; i++) { + /* Wait up to one second for reset to complete. */ + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { ret = REG_READ(REG_GLOBAL, 0x00); if ((ret & 0xc800) == 0xc800) break; - msleep(1); + usleep_range(1000, 2000); } - if (i == 1000) + if (time_after(jiffies, timeout)) return -ETIMEDOUT; return 0; @@ -84,42 +78,34 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) int ret; int i; - /* - * Enable the PHY polling unit, don't discard packets with + /* Enable the PHY polling unit, don't discard packets with * excessive collisions, use a weighted fair queueing scheme * to arbitrate between packet queues, set the maximum frame * size to 1632, and mask all interrupt sources. */ REG_WRITE(REG_GLOBAL, 0x04, 0x4400); - /* - * Set the default address aging time to 5 minutes, and + /* Set the default address aging time to 5 minutes, and * enable address learn messages to be sent to all message * ports. */ REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); - /* - * Configure the priority mapping registers. - */ + /* Configure the priority mapping registers. */ ret = mv88e6xxx_config_prio(ds); if (ret < 0) return ret; - /* - * Set the VLAN ethertype to 0x8100. - */ + /* Set the VLAN ethertype to 0x8100. */ REG_WRITE(REG_GLOBAL, 0x19, 0x8100); - /* - * Disable ARP mirroring, and configure the upstream port as + /* Disable ARP mirroring, and configure the upstream port as * the port to which ingress and egress monitor frames are to * be sent. */ REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0); - /* - * Disable cascade port functionality unless this device + /* Disable cascade port functionality unless this device * is used in a cascade configuration, and set the switch's * DSA device number. */ @@ -128,23 +114,19 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) else REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f)); - /* - * Send all frames with destination addresses matching + /* Send all frames with destination addresses matching * 01:80:c2:00:00:0x to the CPU port. */ REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); - /* - * Ignore removed tag data on doubly tagged packets, disable + /* Ignore removed tag data on doubly tagged packets, disable * flow control messages, force flow control priority to the * highest, and send all special multicast frames to the CPU * port at the highest priority. */ REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); - /* - * Program the DSA routing table. - */ + /* Program the DSA routing table. */ for (i = 0; i < 32; i++) { int nexthop; @@ -155,20 +137,15 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); } - /* - * Clear all trunk masks. - */ + /* Clear all trunk masks. */ for (i = 0; i < 8; i++) REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7ff); - /* - * Clear all trunk mappings. - */ + /* Clear all trunk mappings. */ for (i = 0; i < 16; i++) REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); - /* - * Force the priority of IGMP/MLD snoop frames and ARP frames + /* Force the priority of IGMP/MLD snoop frames and ARP frames * to the highest setting. */ REG_WRITE(REG_GLOBAL2, 0x0f, 0x00ff); @@ -182,8 +159,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p) int addr = REG_PORT(p); u16 val; - /* - * MAC Forcing register: don't force link, speed, duplex + /* MAC Forcing register: don't force link, speed, duplex * or flow control state to any particular values on physical * ports, but force the CPU port and all DSA ports to 1000 Mb/s * (100 Mb/s on 6085) full duplex. @@ -196,8 +172,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p) else REG_WRITE(addr, 0x01, 0x0003); - /* - * Port Control: disable Core Tag, disable Drop-on-Lock, + /* Port Control: disable Core Tag, disable Drop-on-Lock, * transmit frames unmodified, disable Header mode, * enable IGMP/MLD snoop, disable DoubleTag, disable VLAN * tunneling, determine priority by looking at 802.1p and @@ -214,8 +189,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p) val = 0x0433; if (p == dsa_upstream_port(ds)) { val |= 0x0104; - /* - * On 6085, unknown multicast forward is controlled + /* On 6085, unknown multicast forward is controlled * here rather than in Port Control 2 register. */ if (ps->id == ID_6085) @@ -225,14 +199,12 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p) val |= 0x0100; REG_WRITE(addr, 0x04, val); - /* - * Port Control 1: disable trunking. Also, if this is the + /* Port Control 1: disable trunking. Also, if this is the * CPU port, enable learn messages to be sent to this port. */ REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); - /* - * Port based VLAN map: give each port its own address + /* Port based VLAN map: give each port its own address * database, allow the CPU port to talk to each of the 'real' * ports, and allow each of the 'real' ports to only talk to * the upstream port. @@ -244,14 +216,12 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p) val |= 1 << dsa_upstream_port(ds); REG_WRITE(addr, 0x06, val); - /* - * Default VLAN ID and priority: don't set a default VLAN + /* Default VLAN ID and priority: don't set a default VLAN * ID, and set the default packet priority to zero. */ REG_WRITE(addr, 0x07, 0x0000); - /* - * Port Control 2: don't force a good FCS, don't use + /* Port Control 2: don't force a good FCS, don't use * VLAN-based, source address-based or destination * address-based priority overrides, don't let the switch * add or strip 802.1q tags, don't discard tagged or @@ -264,8 +234,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p) * forwarding of unknown multicast addresses. */ if (ps->id == ID_6085) - /* - * on 6085, bits 3:0 are reserved, bit 6 control ARP + /* on 6085, bits 3:0 are reserved, bit 6 control ARP * mirroring, and multicast forward is handled in * Port Control register. */ @@ -277,32 +246,25 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p) REG_WRITE(addr, 0x08, val); } - /* - * Rate Control: disable ingress rate limiting. - */ + /* Rate Control: disable ingress rate limiting. */ REG_WRITE(addr, 0x09, 0x0000); - /* - * Rate Control 2: disable egress rate limiting. - */ + /* Rate Control 2: disable egress rate limiting. */ REG_WRITE(addr, 0x0a, 0x0000); - /* - * Port Association Vector: when learning source addresses + /* Port Association Vector: when learning source addresses * of packets, add the address to the address database using * a port bitmap that has only the bit for this port set and * the other bits clear. */ REG_WRITE(addr, 0x0b, 1 << p); - /* - * Tag Remap: use an identity 802.1p prio -> switch prio + /* Tag Remap: use an identity 802.1p prio -> switch prio * mapping. */ REG_WRITE(addr, 0x18, 0x3210); - /* - * Tag Remap 2: use an identity 802.1p prio -> switch prio + /* Tag Remap 2: use an identity 802.1p prio -> switch prio * mapping. */ REG_WRITE(addr, 0x19, 0x7654); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index a2c62c2f30ee..17314ed9456d 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -8,6 +8,8 @@ * (at your option) any later version. */ +#include <linux/delay.h> +#include <linux/jiffies.h> #include <linux/list.h> #include <linux/module.h> #include <linux/netdevice.h> @@ -15,8 +17,7 @@ #include <net/dsa.h> #include "mv88e6xxx.h" -/* - * If the switch's ADDR[4:0] strap pins are strapped to zero, it will +/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will * use all 32 SMI bus addresses on its SMI bus, and all switch registers * will be directly accessible on some {device address,register address} * pair. If the ADDR[4:0] pins are not strapped to zero, the switch @@ -48,30 +49,22 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg) if (sw_addr == 0) return mdiobus_read(bus, addr, reg); - /* - * Wait for the bus to become free. - */ + /* Wait for the bus to become free. */ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); if (ret < 0) return ret; - /* - * Transmit the read command. - */ + /* Transmit the read command. */ ret = mdiobus_write(bus, sw_addr, 0, 0x9800 | (addr << 5) | reg); if (ret < 0) return ret; - /* - * Wait for the read command to complete. - */ + /* Wait for the read command to complete. */ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); if (ret < 0) return ret; - /* - * Read the data. - */ + /* Read the data. */ ret = mdiobus_read(bus, sw_addr, 1); if (ret < 0) return ret; @@ -100,30 +93,22 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, if (sw_addr == 0) return mdiobus_write(bus, addr, reg, val); - /* - * Wait for the bus to become free. - */ + /* Wait for the bus to become free. */ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); if (ret < 0) return ret; - /* - * Transmit the data to write. - */ + /* Transmit the data to write. */ ret = mdiobus_write(bus, sw_addr, 1, val); if (ret < 0) return ret; - /* - * Transmit the write command. - */ + /* Transmit the write command. */ ret = mdiobus_write(bus, sw_addr, 0, 0x9400 | (addr << 5) | reg); if (ret < 0) return ret; - /* - * Wait for the write command to complete. - */ + /* Wait for the write command to complete. */ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); if (ret < 0) return ret; @@ -146,9 +131,7 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) int mv88e6xxx_config_prio(struct dsa_switch *ds) { - /* - * Configure the IP ToS mapping registers. - */ + /* Configure the IP ToS mapping registers. */ REG_WRITE(REG_GLOBAL, 0x10, 0x0000); REG_WRITE(REG_GLOBAL, 0x11, 0x0000); REG_WRITE(REG_GLOBAL, 0x12, 0x5555); @@ -158,9 +141,7 @@ int mv88e6xxx_config_prio(struct dsa_switch *ds) REG_WRITE(REG_GLOBAL, 0x16, 0xffff); REG_WRITE(REG_GLOBAL, 0x17, 0xffff); - /* - * Configure the IEEE 802.1p priority mapping register. - */ + /* Configure the IEEE 802.1p priority mapping register. */ REG_WRITE(REG_GLOBAL, 0x18, 0xfa41); return 0; @@ -183,14 +164,10 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) for (i = 0; i < 6; i++) { int j; - /* - * Write the MAC address byte. - */ + /* Write the MAC address byte. */ REG_WRITE(REG_GLOBAL2, 0x0d, 0x8000 | (i << 8) | addr[i]); - /* - * Wait for the write to complete. - */ + /* Wait for the write to complete. */ for (j = 0; j < 16; j++) { ret = REG_READ(REG_GLOBAL2, 0x0d); if ((ret & 0x8000) == 0) @@ -221,16 +198,17 @@ int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val) static int mv88e6xxx_ppu_disable(struct dsa_switch *ds) { int ret; - int i; + unsigned long timeout; ret = REG_READ(REG_GLOBAL, 0x04); REG_WRITE(REG_GLOBAL, 0x04, ret & ~0x4000); - for (i = 0; i < 1000; i++) { - ret = REG_READ(REG_GLOBAL, 0x00); - msleep(1); - if ((ret & 0xc000) != 0xc000) - return 0; + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = REG_READ(REG_GLOBAL, 0x00); + usleep_range(1000, 2000); + if ((ret & 0xc000) != 0xc000) + return 0; } return -ETIMEDOUT; @@ -239,16 +217,17 @@ static int mv88e6xxx_ppu_disable(struct dsa_switch *ds) static int mv88e6xxx_ppu_enable(struct dsa_switch *ds) { int ret; - int i; + unsigned long timeout; ret = REG_READ(REG_GLOBAL, 0x04); REG_WRITE(REG_GLOBAL, 0x04, ret | 0x4000); - for (i = 0; i < 1000; i++) { - ret = REG_READ(REG_GLOBAL, 0x00); - msleep(1); - if ((ret & 0xc000) == 0xc000) - return 0; + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = REG_READ(REG_GLOBAL, 0x00); + usleep_range(1000, 2000); + if ((ret & 0xc000) == 0xc000) + return 0; } return -ETIMEDOUT; @@ -260,11 +239,11 @@ static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly) ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work); if (mutex_trylock(&ps->ppu_mutex)) { - struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1; + struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1; - if (mv88e6xxx_ppu_enable(ds) == 0) - ps->ppu_disabled = 0; - mutex_unlock(&ps->ppu_mutex); + if (mv88e6xxx_ppu_enable(ds) == 0) + ps->ppu_disabled = 0; + mutex_unlock(&ps->ppu_mutex); } } @@ -282,22 +261,21 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds) mutex_lock(&ps->ppu_mutex); - /* - * If the PHY polling unit is enabled, disable it so that + /* If the PHY polling unit is enabled, disable it so that * we can access the PHY registers. If it was already * disabled, cancel the timer that is going to re-enable * it. */ if (!ps->ppu_disabled) { - ret = mv88e6xxx_ppu_disable(ds); - if (ret < 0) { - mutex_unlock(&ps->ppu_mutex); - return ret; - } - ps->ppu_disabled = 1; + ret = mv88e6xxx_ppu_disable(ds); + if (ret < 0) { + mutex_unlock(&ps->ppu_mutex); + return ret; + } + ps->ppu_disabled = 1; } else { - del_timer(&ps->ppu_timer); - ret = 0; + del_timer(&ps->ppu_timer); + ret = 0; } return ret; @@ -307,9 +285,7 @@ static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); - /* - * Schedule a timer to re-enable the PHY polling unit. - */ + /* Schedule a timer to re-enable the PHY polling unit. */ mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10)); mutex_unlock(&ps->ppu_mutex); } @@ -331,8 +307,8 @@ int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum) ret = mv88e6xxx_ppu_access_get(ds); if (ret >= 0) { - ret = mv88e6xxx_reg_read(ds, addr, regnum); - mv88e6xxx_ppu_access_put(ds); + ret = mv88e6xxx_reg_read(ds, addr, regnum); + mv88e6xxx_ppu_access_put(ds); } return ret; @@ -345,8 +321,8 @@ int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr, ret = mv88e6xxx_ppu_access_get(ds); if (ret >= 0) { - ret = mv88e6xxx_reg_write(ds, addr, regnum, val); - mv88e6xxx_ppu_access_put(ds); + ret = mv88e6xxx_reg_write(ds, addr, regnum, val); + mv88e6xxx_ppu_access_put(ds); } return ret; @@ -380,7 +356,7 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds) if (!link) { if (netif_carrier_ok(dev)) { - printk(KERN_INFO "%s: link down\n", dev->name); + netdev_info(dev, "link down\n"); netif_carrier_off(dev); } continue; @@ -404,10 +380,11 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds) fc = (port_status & 0x8000) ? 1 : 0; if (!netif_carrier_ok(dev)) { - printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " - "flow control %sabled\n", dev->name, - speed, duplex ? "full" : "half", - fc ? "en" : "dis"); + netdev_info(dev, + "link up, %d Mb/s, %s duplex, flow control %sabled\n", + speed, + duplex ? "full" : "half", + fc ? "en" : "dis"); netif_carrier_on(dev); } } @@ -431,14 +408,10 @@ static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port) { int ret; - /* - * Snapshot the hardware statistics counters for this port. - */ + /* Snapshot the hardware statistics counters for this port. */ REG_WRITE(REG_GLOBAL, 0x1d, 0xdc00 | port); - /* - * Wait for the snapshotting to complete. - */ + /* Wait for the snapshotting to complete. */ ret = mv88e6xxx_stats_wait(ds); if (ret < 0) return ret; @@ -502,9 +475,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, return; } - /* - * Read each of the counters. - */ + /* Read each of the counters. */ for (i = 0; i < nr_stats; i++) { struct mv88e6xxx_hw_stat *s = stats + i; u32 low; diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index fc2cd7b90e8d..911ede58dd12 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -16,16 +16,14 @@ #define REG_GLOBAL2 0x1c struct mv88e6xxx_priv_state { - /* - * When using multi-chip addressing, this mutex protects + /* When using multi-chip addressing, this mutex protects * access to the indirect access registers. (In single-chip * mode, this mutex is effectively useless.) */ struct mutex smi_mutex; #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU - /* - * Handles automatic disabling and re-enabling of the PHY + /* Handles automatic disabling and re-enabling of the PHY * polling unit. */ struct mutex ppu_mutex; @@ -34,8 +32,7 @@ struct mv88e6xxx_priv_state { struct timer_list ppu_timer; #endif - /* - * This mutex serialises access to the statistics unit. + /* This mutex serialises access to the statistics unit. * Hold this mutex over snapshot + dump sequences. */ struct mutex stats_mutex; @@ -52,7 +49,7 @@ struct mv88e6xxx_hw_stat { int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg); int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg); int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, - int reg, u16 val); + int reg, u16 val); int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val); int mv88e6xxx_config_prio(struct dsa_switch *ds); int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr); diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index c260af5411d0..42aa54af6842 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -100,6 +100,15 @@ static void dummy_dev_uninit(struct net_device *dev) free_percpu(dev->dstats); } +static int dummy_change_carrier(struct net_device *dev, bool new_carrier) +{ + if (new_carrier) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + return 0; +} + static const struct net_device_ops dummy_netdev_ops = { .ndo_init = dummy_dev_init, .ndo_uninit = dummy_dev_uninit, @@ -108,6 +117,7 @@ static const struct net_device_ops dummy_netdev_ops = { .ndo_set_rx_mode = set_multicast_list, .ndo_set_mac_address = eth_mac_addr, .ndo_get_stats64 = dummy_get_stats64, + .ndo_change_carrier = dummy_change_carrier, }; static void dummy_setup(struct net_device *dev) diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c deleted file mode 100644 index 2038eaabaea4..000000000000 --- a/drivers/net/ethernet/3com/3c501.c +++ /dev/null @@ -1,896 +0,0 @@ -/* 3c501.c: A 3Com 3c501 Ethernet driver for Linux. */ -/* - Written 1992,1993,1994 Donald Becker - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. This software may be used and - distributed according to the terms of the GNU General Public License, - incorporated herein by reference. - - This is a device driver for the 3Com Etherlink 3c501. - Do not purchase this card, even as a joke. It's performance is horrible, - and it breaks in many ways. - - The original author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - Fixed (again!) the missing interrupt locking on TX/RX shifting. - Alan Cox <alan@lxorguk.ukuu.org.uk> - - Removed calls to init_etherdev since they are no longer needed, and - cleaned up modularization just a bit. The driver still allows only - the default address for cards when loaded as a module, but that's - really less braindead than anyone using a 3c501 board. :) - 19950208 (invid@msen.com) - - Added traps for interrupts hitting the window as we clear and TX load - the board. Now getting 150K/second FTP with a 3c501 card. Still playing - with a TX-TX optimisation to see if we can touch 180-200K/second as seems - theoretically maximum. - 19950402 Alan Cox <alan@lxorguk.ukuu.org.uk> - - Cleaned up for 2.3.x because we broke SMP now. - 20000208 Alan Cox <alan@lxorguk.ukuu.org.uk> - - Check up pass for 2.5. Nothing significant changed - 20021009 Alan Cox <alan@lxorguk.ukuu.org.uk> - - Fixed zero fill corner case - 20030104 Alan Cox <alan@lxorguk.ukuu.org.uk> - - - For the avoidance of doubt the "preferred form" of this code is one which - is in an open non patent encumbered format. Where cryptographic key signing - forms part of the process of creating an executable the information - including keys needed to generate an equivalently functional executable - are deemed to be part of the source code. - -*/ - - -/** - * DOC: 3c501 Card Notes - * - * Some notes on this thing if you have to hack it. [Alan] - * - * Some documentation is available from 3Com. Due to the boards age - * standard responses when you ask for this will range from 'be serious' - * to 'give it to a museum'. The documentation is incomplete and mostly - * of historical interest anyway. - * - * The basic system is a single buffer which can be used to receive or - * transmit a packet. A third command mode exists when you are setting - * things up. - * - * If it's transmitting it's not receiving and vice versa. In fact the - * time to get the board back into useful state after an operation is - * quite large. - * - * The driver works by keeping the board in receive mode waiting for a - * packet to arrive. When one arrives it is copied out of the buffer - * and delivered to the kernel. The card is reloaded and off we go. - * - * When transmitting lp->txing is set and the card is reset (from - * receive mode) [possibly losing a packet just received] to command - * mode. A packet is loaded and transmit mode triggered. The interrupt - * handler runs different code for transmit interrupts and can handle - * returning to receive mode or retransmissions (yes you have to help - * out with those too). - * - * DOC: Problems - * - * There are a wide variety of undocumented error returns from the card - * and you basically have to kick the board and pray if they turn up. Most - * only occur under extreme load or if you do something the board doesn't - * like (eg touching a register at the wrong time). - * - * The driver is less efficient than it could be. It switches through - * receive mode even if more transmits are queued. If this worries you buy - * a real Ethernet card. - * - * The combination of slow receive restart and no real multicast - * filter makes the board unusable with a kernel compiled for IP - * multicasting in a real multicast environment. That's down to the board, - * but even with no multicast programs running a multicast IP kernel is - * in group 224.0.0.1 and you will therefore be listening to all multicasts. - * One nv conference running over that Ethernet and you can give up. - * - */ - -#define DRV_NAME "3c501" -#define DRV_VERSION "2002/10/09" - - -static const char version[] = - DRV_NAME ".c: " DRV_VERSION " Alan Cox (alan@lxorguk.ukuu.org.uk).\n"; - -/* - * Braindamage remaining: - * The 3c501 board. - */ - -#include <linux/module.h> - -#include <linux/kernel.h> -#include <linux/fcntl.h> -#include <linux/ioport.h> -#include <linux/interrupt.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/spinlock.h> -#include <linux/ethtool.h> -#include <linux/delay.h> -#include <linux/bitops.h> - -#include <asm/uaccess.h> -#include <asm/io.h> - -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/init.h> - -#include "3c501.h" - -/* - * The boilerplate probe code. - */ - -static int io = 0x280; -static int irq = 5; -static int mem_start; - -/** - * el1_probe - probe for a 3c501 - * @dev: The device structure passed in to probe. - * - * This can be called from two places. The network layer will probe using - * a device structure passed in with the probe information completed. For a - * modular driver we use #init_module to fill in our own structure and probe - * for it. - * - * Returns 0 on success. ENXIO if asked not to probe and ENODEV if asked to - * probe and failing to find anything. - */ - -struct net_device * __init el1_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); - static const unsigned ports[] = { 0x280, 0x300, 0}; - const unsigned *port; - int err = 0; - - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - io = dev->base_addr; - irq = dev->irq; - mem_start = dev->mem_start & 7; - } - - if (io > 0x1ff) { /* Check a single specified location. */ - err = el1_probe1(dev, io); - } else if (io != 0) { - err = -ENXIO; /* Don't probe at all. */ - } else { - for (port = ports; *port && el1_probe1(dev, *port); port++) - ; - if (!*port) - err = -ENODEV; - } - if (err) - goto out; - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - release_region(dev->base_addr, EL1_IO_EXTENT); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static const struct net_device_ops el_netdev_ops = { - .ndo_open = el_open, - .ndo_stop = el1_close, - .ndo_start_xmit = el_start_xmit, - .ndo_tx_timeout = el_timeout, - .ndo_set_rx_mode = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/** - * el1_probe1: - * @dev: The device structure to use - * @ioaddr: An I/O address to probe at. - * - * The actual probe. This is iterated over by #el1_probe in order to - * check all the applicable device locations. - * - * Returns 0 for a success, in which case the device is activated, - * EAGAIN if the IRQ is in use by another driver, and ENODEV if the - * board cannot be found. - */ - -static int __init el1_probe1(struct net_device *dev, int ioaddr) -{ - struct net_local *lp; - const char *mname; /* Vendor name */ - unsigned char station_addr[6]; - int autoirq = 0; - int i; - - /* - * Reserve I/O resource for exclusive use by this driver - */ - - if (!request_region(ioaddr, EL1_IO_EXTENT, DRV_NAME)) - return -ENODEV; - - /* - * Read the station address PROM data from the special port. - */ - - for (i = 0; i < 6; i++) { - outw(i, ioaddr + EL1_DATAPTR); - station_addr[i] = inb(ioaddr + EL1_SAPROM); - } - /* - * Check the first three octets of the S.A. for 3Com's prefix, or - * for the Sager NP943 prefix. - */ - - if (station_addr[0] == 0x02 && station_addr[1] == 0x60 && - station_addr[2] == 0x8c) - mname = "3c501"; - else if (station_addr[0] == 0x00 && station_addr[1] == 0x80 && - station_addr[2] == 0xC8) - mname = "NP943"; - else { - release_region(ioaddr, EL1_IO_EXTENT); - return -ENODEV; - } - - /* - * We auto-IRQ by shutting off the interrupt line and letting it - * float high. - */ - - dev->irq = irq; - - if (dev->irq < 2) { - unsigned long irq_mask; - - irq_mask = probe_irq_on(); - inb(RX_STATUS); /* Clear pending interrupts. */ - inb(TX_STATUS); - outb(AX_LOOP + 1, AX_CMD); - - outb(0x00, AX_CMD); - - mdelay(20); - autoirq = probe_irq_off(irq_mask); - - if (autoirq == 0) { - pr_warning("%s probe at %#x failed to detect IRQ line.\n", - mname, ioaddr); - release_region(ioaddr, EL1_IO_EXTENT); - return -EAGAIN; - } - } - - outb(AX_RESET+AX_LOOP, AX_CMD); /* Loopback mode. */ - dev->base_addr = ioaddr; - memcpy(dev->dev_addr, station_addr, ETH_ALEN); - - if (mem_start & 0xf) - el_debug = mem_start & 0x7; - if (autoirq) - dev->irq = autoirq; - - pr_info("%s: %s EtherLink at %#lx, using %sIRQ %d.\n", - dev->name, mname, dev->base_addr, - autoirq ? "auto":"assigned ", dev->irq); - -#ifdef CONFIG_IP_MULTICAST - pr_warning("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n"); -#endif - - if (el_debug) - pr_debug("%s", version); - - lp = netdev_priv(dev); - memset(lp, 0, sizeof(struct net_local)); - spin_lock_init(&lp->lock); - - /* - * The EL1-specific entries in the device structure. - */ - - dev->netdev_ops = &el_netdev_ops; - dev->watchdog_timeo = HZ; - dev->ethtool_ops = &netdev_ethtool_ops; - return 0; -} - -/** - * el1_open: - * @dev: device that is being opened - * - * When an ifconfig is issued which changes the device flags to include - * IFF_UP this function is called. It is only called when the change - * occurs, not when the interface remains up. #el1_close will be called - * when it goes down. - * - * Returns 0 for a successful open, or -EAGAIN if someone has run off - * with our interrupt line. - */ - -static int el_open(struct net_device *dev) -{ - int retval; - int ioaddr = dev->base_addr; - struct net_local *lp = netdev_priv(dev); - unsigned long flags; - - if (el_debug > 2) - pr_debug("%s: Doing el_open()...\n", dev->name); - - retval = request_irq(dev->irq, el_interrupt, 0, dev->name, dev); - if (retval) - return retval; - - spin_lock_irqsave(&lp->lock, flags); - el_reset(dev); - spin_unlock_irqrestore(&lp->lock, flags); - - lp->txing = 0; /* Board in RX mode */ - outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */ - netif_start_queue(dev); - return 0; -} - -/** - * el_timeout: - * @dev: The 3c501 card that has timed out - * - * Attempt to restart the board. This is basically a mixture of extreme - * violence and prayer - * - */ - -static void el_timeout(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - if (el_debug) - pr_debug("%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n", - dev->name, inb(TX_STATUS), - inb(AX_STATUS), inb(RX_STATUS)); - dev->stats.tx_errors++; - outb(TX_NORM, TX_CMD); - outb(RX_NORM, RX_CMD); - outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */ - outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */ - lp->txing = 0; /* Ripped back in to RX */ - netif_wake_queue(dev); -} - - -/** - * el_start_xmit: - * @skb: The packet that is queued to be sent - * @dev: The 3c501 card we want to throw it down - * - * Attempt to send a packet to a 3c501 card. There are some interesting - * catches here because the 3c501 is an extremely old and therefore - * stupid piece of technology. - * - * If we are handling an interrupt on the other CPU we cannot load a packet - * as we may still be attempting to retrieve the last RX packet buffer. - * - * When a transmit times out we dump the card into control mode and just - * start again. It happens enough that it isn't worth logging. - * - * We avoid holding the spin locks when doing the packet load to the board. - * The device is very slow, and its DMA mode is even slower. If we held the - * lock while loading 1500 bytes onto the controller we would drop a lot of - * serial port characters. This requires we do extra locking, but we have - * no real choice. - */ - -static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - unsigned long flags; - - /* - * Avoid incoming interrupts between us flipping txing and flipping - * mode as the driver assumes txing is a faithful indicator of card - * state - */ - - spin_lock_irqsave(&lp->lock, flags); - - /* - * Avoid timer-based retransmission conflicts. - */ - - netif_stop_queue(dev); - - do { - int len = skb->len; - int pad = 0; - int gp_start; - unsigned char *buf = skb->data; - - if (len < ETH_ZLEN) - pad = ETH_ZLEN - len; - - gp_start = 0x800 - (len + pad); - - lp->tx_pkt_start = gp_start; - lp->collisions = 0; - - dev->stats.tx_bytes += skb->len; - - /* - * Command mode with status cleared should [in theory] - * mean no more interrupts can be pending on the card. - */ - - outb_p(AX_SYS, AX_CMD); - inb_p(RX_STATUS); - inb_p(TX_STATUS); - - lp->loading = 1; - lp->txing = 1; - - /* - * Turn interrupts back on while we spend a pleasant - * afternoon loading bytes into the board - */ - - spin_unlock_irqrestore(&lp->lock, flags); - - /* Set rx packet area to 0. */ - outw(0x00, RX_BUF_CLR); - /* aim - packet will be loaded into buffer start */ - outw(gp_start, GP_LOW); - /* load buffer (usual thing each byte increments the pointer) */ - outsb(DATAPORT, buf, len); - if (pad) { - while (pad--) /* Zero fill buffer tail */ - outb(0, DATAPORT); - } - /* the board reuses the same register */ - outw(gp_start, GP_LOW); - - if (lp->loading != 2) { - /* fire ... Trigger xmit. */ - outb(AX_XMIT, AX_CMD); - lp->loading = 0; - if (el_debug > 2) - pr_debug(" queued xmit.\n"); - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - /* A receive upset our load, despite our best efforts */ - if (el_debug > 2) - pr_debug("%s: burped during tx load.\n", dev->name); - spin_lock_irqsave(&lp->lock, flags); - } while (1); -} - -/** - * el_interrupt: - * @irq: Interrupt number - * @dev_id: The 3c501 that burped - * - * Handle the ether interface interrupts. The 3c501 needs a lot more - * hand holding than most cards. In particular we get a transmit interrupt - * with a collision error because the board firmware isn't capable of rewinding - * its own transmit buffer pointers. It can however count to 16 for us. - * - * On the receive side the card is also very dumb. It has no buffering to - * speak of. We simply pull the packet out of its PIO buffer (which is slow) - * and queue it for the kernel. Then we reset the card for the next packet. - * - * We sometimes get surprise interrupts late both because the SMP IRQ delivery - * is message passing and because the card sometimes seems to deliver late. I - * think if it is part way through a receive and the mode is changed it carries - * on receiving and sends us an interrupt. We have to band aid all these cases - * to get a sensible 150kBytes/second performance. Even then you want a small - * TCP window. - */ - -static irqreturn_t el_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct net_local *lp; - int ioaddr; - int axsr; /* Aux. status reg. */ - - ioaddr = dev->base_addr; - lp = netdev_priv(dev); - - spin_lock(&lp->lock); - - /* - * What happened ? - */ - - axsr = inb(AX_STATUS); - - /* - * Log it - */ - - if (el_debug > 3) - pr_debug("%s: el_interrupt() aux=%#02x\n", dev->name, axsr); - - if (lp->loading == 1 && !lp->txing) - pr_warning("%s: Inconsistent state loading while not in tx\n", - dev->name); - - if (lp->txing) { - /* - * Board in transmit mode. May be loading. If we are - * loading we shouldn't have got this. - */ - int txsr = inb(TX_STATUS); - - if (lp->loading == 1) { - if (el_debug > 2) - pr_debug("%s: Interrupt while loading [txsr=%02x gp=%04x rp=%04x]\n", - dev->name, txsr, inw(GP_LOW), inw(RX_LOW)); - - /* Force a reload */ - lp->loading = 2; - spin_unlock(&lp->lock); - goto out; - } - if (el_debug > 6) - pr_debug("%s: txsr=%02x gp=%04x rp=%04x\n", dev->name, - txsr, inw(GP_LOW), inw(RX_LOW)); - - if ((axsr & 0x80) && (txsr & TX_READY) == 0) { - /* - * FIXME: is there a logic to whether to keep - * on trying or reset immediately ? - */ - if (el_debug > 1) - pr_debug("%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x gp=%03x rp=%03x.\n", - dev->name, txsr, axsr, - inw(ioaddr + EL1_DATAPTR), - inw(ioaddr + EL1_RXPTR)); - lp->txing = 0; - netif_wake_queue(dev); - } else if (txsr & TX_16COLLISIONS) { - /* - * Timed out - */ - if (el_debug) - pr_debug("%s: Transmit failed 16 times, Ethernet jammed?\n", dev->name); - outb(AX_SYS, AX_CMD); - lp->txing = 0; - dev->stats.tx_aborted_errors++; - netif_wake_queue(dev); - } else if (txsr & TX_COLLISION) { - /* - * Retrigger xmit. - */ - - if (el_debug > 6) - pr_debug("%s: retransmitting after a collision.\n", dev->name); - /* - * Poor little chip can't reset its own start - * pointer - */ - - outb(AX_SYS, AX_CMD); - outw(lp->tx_pkt_start, GP_LOW); - outb(AX_XMIT, AX_CMD); - dev->stats.collisions++; - spin_unlock(&lp->lock); - goto out; - } else { - /* - * It worked.. we will now fall through and receive - */ - dev->stats.tx_packets++; - if (el_debug > 6) - pr_debug("%s: Tx succeeded %s\n", dev->name, - (txsr & TX_RDY) ? "." : "but tx is busy!"); - /* - * This is safe the interrupt is atomic WRT itself. - */ - lp->txing = 0; - /* In case more to transmit */ - netif_wake_queue(dev); - } - } else { - /* - * In receive mode. - */ - - int rxsr = inb(RX_STATUS); - if (el_debug > 5) - pr_debug("%s: rxsr=%02x txsr=%02x rp=%04x\n", - dev->name, rxsr, inb(TX_STATUS), inw(RX_LOW)); - /* - * Just reading rx_status fixes most errors. - */ - if (rxsr & RX_MISSED) - dev->stats.rx_missed_errors++; - else if (rxsr & RX_RUNT) { - /* Handled to avoid board lock-up. */ - dev->stats.rx_length_errors++; - if (el_debug > 5) - pr_debug("%s: runt.\n", dev->name); - } else if (rxsr & RX_GOOD) { - /* - * Receive worked. - */ - el_receive(dev); - } else { - /* - * Nothing? Something is broken! - */ - if (el_debug > 2) - pr_debug("%s: No packet seen, rxsr=%02x **resetting 3c501***\n", - dev->name, rxsr); - el_reset(dev); - } - } - - /* - * Move into receive mode - */ - - outb(AX_RX, AX_CMD); - outw(0x00, RX_BUF_CLR); - inb(RX_STATUS); /* Be certain that interrupts are cleared. */ - inb(TX_STATUS); - spin_unlock(&lp->lock); -out: - return IRQ_HANDLED; -} - - -/** - * el_receive: - * @dev: Device to pull the packets from - * - * We have a good packet. Well, not really "good", just mostly not broken. - * We must check everything to see if it is good. In particular we occasionally - * get wild packet sizes from the card. If the packet seems sane we PIO it - * off the card and queue it for the protocol layers. - */ - -static void el_receive(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - int pkt_len; - struct sk_buff *skb; - - pkt_len = inw(RX_LOW); - - if (el_debug > 4) - pr_debug(" el_receive %d.\n", pkt_len); - - if (pkt_len < 60 || pkt_len > 1536) { - if (el_debug) - pr_debug("%s: bogus packet, length=%d\n", - dev->name, pkt_len); - dev->stats.rx_over_errors++; - return; - } - - /* - * Command mode so we can empty the buffer - */ - - outb(AX_SYS, AX_CMD); - skb = netdev_alloc_skb(dev, pkt_len + 2); - - /* - * Start of frame - */ - - outw(0x00, GP_LOW); - if (skb == NULL) { - pr_info("%s: Memory squeeze, dropping packet.\n", dev->name); - dev->stats.rx_dropped++; - return; - } else { - skb_reserve(skb, 2); /* Force 16 byte alignment */ - /* - * The read increments through the bytes. The interrupt - * handler will fix the pointer when it returns to - * receive mode. - */ - insb(DATAPORT, skb_put(skb, pkt_len), pkt_len); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } -} - -/** - * el_reset: Reset a 3c501 card - * @dev: The 3c501 card about to get zapped - * - * Even resetting a 3c501 isn't simple. When you activate reset it loses all - * its configuration. You must hold the lock when doing this. The function - * cannot take the lock itself as it is callable from the irq handler. - */ - -static void el_reset(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - if (el_debug > 2) - pr_info("3c501 reset...\n"); - outb(AX_RESET, AX_CMD); /* Reset the chip */ - /* Aux control, irq and loopback enabled */ - outb(AX_LOOP, AX_CMD); - { - int i; - for (i = 0; i < 6; i++) /* Set the station address. */ - outb(dev->dev_addr[i], ioaddr + i); - } - - outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */ - outb(TX_NORM, TX_CMD); /* tx irq on done, collision */ - outb(RX_NORM, RX_CMD); /* Set Rx commands. */ - inb(RX_STATUS); /* Clear status. */ - inb(TX_STATUS); - lp->txing = 0; -} - -/** - * el1_close: - * @dev: 3c501 card to shut down - * - * Close a 3c501 card. The IFF_UP flag has been cleared by the user via - * the SIOCSIFFLAGS ioctl. We stop any further transmissions being queued, - * and then disable the interrupts. Finally we reset the chip. The effects - * of the rest will be cleaned up by #el1_open. Always returns 0 indicating - * a success. - */ - -static int el1_close(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - if (el_debug > 2) - pr_info("%s: Shutting down Ethernet card at %#x.\n", - dev->name, ioaddr); - - netif_stop_queue(dev); - - /* - * Free and disable the IRQ. - */ - - free_irq(dev->irq, dev); - outb(AX_RESET, AX_CMD); /* Reset the chip */ - - return 0; -} - -/** - * set_multicast_list: - * @dev: The device to adjust - * - * Set or clear the multicast filter for this adaptor to use the best-effort - * filtering supported. The 3c501 supports only three modes of filtering. - * It always receives broadcasts and packets for itself. You can choose to - * optionally receive all packets, or all multicast packets on top of this. - */ - -static void set_multicast_list(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - if (dev->flags & IFF_PROMISC) { - outb(RX_PROM, RX_CMD); - inb(RX_STATUS); - } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { - /* Multicast or all multicast is the same */ - outb(RX_MULT, RX_CMD); - inb(RX_STATUS); /* Clear status. */ - } else { - outb(RX_NORM, RX_CMD); - inb(RX_STATUS); - } -} - - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); -} - -static u32 netdev_get_msglevel(struct net_device *dev) -{ - return debug; -} - -static void netdev_set_msglevel(struct net_device *dev, u32 level) -{ - debug = level; -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, - .get_msglevel = netdev_get_msglevel, - .set_msglevel = netdev_set_msglevel, -}; - -#ifdef MODULE - -static struct net_device *dev_3c501; - -module_param(io, int, 0); -module_param(irq, int, 0); -MODULE_PARM_DESC(io, "EtherLink I/O base address"); -MODULE_PARM_DESC(irq, "EtherLink IRQ number"); - -/** - * init_module: - * - * When the driver is loaded as a module this function is called. We fake up - * a device structure with the base I/O and interrupt set as if it were being - * called from Space.c. This minimises the extra code that would otherwise - * be required. - * - * Returns 0 for success or -EIO if a card is not found. Returning an error - * here also causes the module to be unloaded - */ - -int __init init_module(void) -{ - dev_3c501 = el1_probe(-1); - if (IS_ERR(dev_3c501)) - return PTR_ERR(dev_3c501); - return 0; -} - -/** - * cleanup_module: - * - * The module is being unloaded. We unhook our network device from the system - * and then free up the resources we took when the card was found. - */ - -void __exit cleanup_module(void) -{ - struct net_device *dev = dev_3c501; - unregister_netdev(dev); - release_region(dev->base_addr, EL1_IO_EXTENT); - free_netdev(dev); -} - -#endif /* MODULE */ - -MODULE_AUTHOR("Donald Becker, Alan Cox"); -MODULE_DESCRIPTION("Support for the ancient 3Com 3c501 ethernet card"); -MODULE_LICENSE("GPL"); - diff --git a/drivers/net/ethernet/3com/3c501.h b/drivers/net/ethernet/3com/3c501.h deleted file mode 100644 index 183fd55f03cb..000000000000 --- a/drivers/net/ethernet/3com/3c501.h +++ /dev/null @@ -1,91 +0,0 @@ - -/* - * Index to functions. - */ - -static int el1_probe1(struct net_device *dev, int ioaddr); -static int el_open(struct net_device *dev); -static void el_timeout(struct net_device *dev); -static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev); -static irqreturn_t el_interrupt(int irq, void *dev_id); -static void el_receive(struct net_device *dev); -static void el_reset(struct net_device *dev); -static int el1_close(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static const struct ethtool_ops netdev_ethtool_ops; - -#define EL1_IO_EXTENT 16 - -#ifndef EL_DEBUG -#define EL_DEBUG 0 /* use 0 for production, 1 for devel., >2 for debug */ -#endif /* Anything above 5 is wordy death! */ -#define debug el_debug -static int el_debug = EL_DEBUG; - -/* - * Board-specific info in netdev_priv(dev). - */ - -struct net_local -{ - int tx_pkt_start; /* The length of the current Tx packet. */ - int collisions; /* Tx collisions this packet */ - int loading; /* Spot buffer load collisions */ - int txing; /* True if card is in TX mode */ - spinlock_t lock; /* Serializing lock */ -}; - - -#define RX_STATUS (ioaddr + 0x06) -#define RX_CMD RX_STATUS -#define TX_STATUS (ioaddr + 0x07) -#define TX_CMD TX_STATUS -#define GP_LOW (ioaddr + 0x08) -#define GP_HIGH (ioaddr + 0x09) -#define RX_BUF_CLR (ioaddr + 0x0A) -#define RX_LOW (ioaddr + 0x0A) -#define RX_HIGH (ioaddr + 0x0B) -#define SAPROM (ioaddr + 0x0C) -#define AX_STATUS (ioaddr + 0x0E) -#define AX_CMD AX_STATUS -#define DATAPORT (ioaddr + 0x0F) -#define TX_RDY 0x08 /* In TX_STATUS */ - -#define EL1_DATAPTR 0x08 -#define EL1_RXPTR 0x0A -#define EL1_SAPROM 0x0C -#define EL1_DATAPORT 0x0f - -/* - * Writes to the ax command register. - */ - -#define AX_OFF 0x00 /* Irq off, buffer access on */ -#define AX_SYS 0x40 /* Load the buffer */ -#define AX_XMIT 0x44 /* Transmit a packet */ -#define AX_RX 0x48 /* Receive a packet */ -#define AX_LOOP 0x0C /* Loopback mode */ -#define AX_RESET 0x80 - -/* - * Normal receive mode written to RX_STATUS. We must intr on short packets - * to avoid bogus rx lockups. - */ - -#define RX_NORM 0xA8 /* 0x68 == all addrs, 0xA8 only to me. */ -#define RX_PROM 0x68 /* Senior Prom, uhmm promiscuous mode. */ -#define RX_MULT 0xE8 /* Accept multicast packets. */ -#define TX_NORM 0x0A /* Interrupt on everything that might hang the chip */ - -/* - * TX_STATUS register. - */ - -#define TX_COLLISION 0x02 -#define TX_16COLLISIONS 0x04 -#define TX_READY 0x08 - -#define RX_RUNT 0x08 -#define RX_MISSED 0x01 /* Missed a packet due to 3c501 braindamage. */ -#define RX_GOOD 0x30 /* Good packet 0x20, or simple overflow 0x10. */ - diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 633c709b9d99..f36ff99fd394 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -1161,8 +1161,8 @@ el3_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } static int el3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 59e1e001bc3f..94c656f5a05d 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -1542,9 +1542,10 @@ static void set_rx_mode(struct net_device *dev) static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), "ISA 0x%lx", + dev->base_addr); } static u32 netdev_get_msglevel(struct net_device *dev) diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index ed0feb3cc6fa..1928e2001587 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1293,7 +1293,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); for (i = 0; i < 3; i++) ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); if (print_info) pr_cont(" %pM", dev->dev_addr); /* Unfortunately an all zero eeprom passes the checksum and this diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig index eb56174469a7..1c71c763f680 100644 --- a/drivers/net/ethernet/3com/Kconfig +++ b/drivers/net/ethernet/3com/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_3COM bool "3Com devices" default y - depends on ISA || EISA || MCA || PCI || PCMCIA + depends on ISA || EISA || PCI || PCMCIA ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -18,23 +18,9 @@ config NET_VENDOR_3COM if NET_VENDOR_3COM -config EL1 - tristate "3c501 \"EtherLink\" support" - depends on ISA - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. Also, consider buying a - new card, since the 3c501 is slow, broken, and obsolete: you will - have problems. Some people suggest to ping ("man ping") a nearby - machine every minute ("man cron") when using this card. - - To compile this driver as a module, choose M here. The module - will be called 3c501. - config EL3 - tristate "3c509/3c529 (MCA)/3c579 \"EtherLink III\" support" - depends on (ISA || EISA || MCA) + tristate "3c509/3c579 \"EtherLink III\" support" + depends on (ISA || EISA) ---help--- If you have a network (Ethernet) card belonging to the 3Com EtherLinkIII series, say Y and read the Ethernet-HOWTO, available diff --git a/drivers/net/ethernet/3com/Makefile b/drivers/net/ethernet/3com/Makefile index 1e5382a30ead..74046afab993 100644 --- a/drivers/net/ethernet/3com/Makefile +++ b/drivers/net/ethernet/3com/Makefile @@ -2,7 +2,6 @@ # Makefile for the 3Com Ethernet device drivers # -obj-$(CONFIG_EL1) += 3c501.o obj-$(CONFIG_EL3) += 3c509.o obj-$(CONFIG_3C515) += 3c515.o obj-$(CONFIG_PCMCIA_3C589) += 3c589_cs.o diff --git a/drivers/net/ethernet/8390/3c503.c b/drivers/net/ethernet/8390/3c503.c deleted file mode 100644 index 49d76bd0dc86..000000000000 --- a/drivers/net/ethernet/8390/3c503.c +++ /dev/null @@ -1,777 +0,0 @@ -/* 3c503.c: A shared-memory NS8390 ethernet driver for linux. */ -/* - Written 1992-94 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. This software may be used and - distributed according to the terms of the GNU General Public License, - incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - - This driver should work with the 3c503 and 3c503/16. It should be used - in shared memory mode for best performance, although it may also work - in programmed-I/O mode. - - Sources: - EtherLink II Technical Reference Manual, - EtherLink II/16 Technical Reference Manual Supplement, - 3Com Corporation, 5400 Bayfront Plaza, Santa Clara CA 95052-8145 - - The Crynwr 3c503 packet driver. - - Changelog: - - Paul Gortmaker : add support for the 2nd 8kB of RAM on 16 bit cards. - Paul Gortmaker : multiple card support for module users. - rjohnson@analogic.com : Fix up PIO interface for efficient operation. - Jeff Garzik : ethtool support - -*/ - -#define DRV_NAME "3c503" -#define DRV_VERSION "1.10a" -#define DRV_RELDATE "11/17/2001" - - -static const char version[] = - DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n"; - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/ethtool.h> - -#include <asm/uaccess.h> -#include <asm/io.h> -#include <asm/byteorder.h> - -#include "8390.h" -#include "3c503.h" -#define WRD_COUNT 4 - -static int el2_pio_probe(struct net_device *dev); -static int el2_probe1(struct net_device *dev, int ioaddr); - -/* A zero-terminated list of I/O addresses to be probed in PIO mode. */ -static unsigned int netcard_portlist[] __initdata = - { 0x300,0x310,0x330,0x350,0x250,0x280,0x2a0,0x2e0,0}; - -#define EL2_IO_EXTENT 16 - -static int el2_open(struct net_device *dev); -static int el2_close(struct net_device *dev); -static void el2_reset_8390(struct net_device *dev); -static void el2_init_card(struct net_device *dev); -static void el2_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); -static void el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, - int ring_offset); -static void el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static const struct ethtool_ops netdev_ethtool_ops; - - -/* This routine probes for a memory-mapped 3c503 board by looking for - the "location register" at the end of the jumpered boot PROM space. - This works even if a PROM isn't there. - - If the ethercard isn't found there is an optional probe for - ethercard jumpered to programmed-I/O mode. - */ -static int __init do_el2_probe(struct net_device *dev) -{ - int *addr, addrs[] = { 0xddffe, 0xd9ffe, 0xcdffe, 0xc9ffe, 0}; - int base_addr = dev->base_addr; - int irq = dev->irq; - - if (base_addr > 0x1ff) /* Check a single specified location. */ - return el2_probe1(dev, base_addr); - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - for (addr = addrs; *addr; addr++) { - void __iomem *p = ioremap(*addr, 1); - unsigned base_bits; - int i; - - if (!p) - continue; - base_bits = readb(p); - iounmap(p); - i = ffs(base_bits) - 1; - if (i == -1 || base_bits != (1 << i)) - continue; - if (el2_probe1(dev, netcard_portlist[i]) == 0) - return 0; - dev->irq = irq; - } -#if ! defined(no_probe_nonshared_memory) - return el2_pio_probe(dev); -#else - return -ENODEV; -#endif -} - -/* Try all of the locations that aren't obviously empty. This touches - a lot of locations, and is much riskier than the code above. */ -static int __init -el2_pio_probe(struct net_device *dev) -{ - int i; - int base_addr = dev->base_addr; - int irq = dev->irq; - - if (base_addr > 0x1ff) /* Check a single specified location. */ - return el2_probe1(dev, base_addr); - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - for (i = 0; netcard_portlist[i]; i++) { - if (el2_probe1(dev, netcard_portlist[i]) == 0) - return 0; - dev->irq = irq; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init el2_probe(int unit) -{ - struct net_device *dev = alloc_eip_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_el2_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops el2_netdev_ops = { - .ndo_open = el2_open, - .ndo_stop = el2_close, - - .ndo_start_xmit = eip_start_xmit, - .ndo_tx_timeout = eip_tx_timeout, - .ndo_get_stats = eip_get_stats, - .ndo_set_rx_mode = eip_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = eip_poll, -#endif -}; - -/* Probe for the Etherlink II card at I/O port base IOADDR, - returning non-zero on success. If found, set the station - address and memory parameters in DEVICE. */ -static int __init -el2_probe1(struct net_device *dev, int ioaddr) -{ - int i, iobase_reg, membase_reg, saved_406, wordlength, retval; - static unsigned version_printed; - unsigned long vendor_id; - - if (!request_region(ioaddr, EL2_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - if (!request_region(ioaddr + 0x400, 8, DRV_NAME)) { - retval = -EBUSY; - goto out; - } - - /* Reset and/or avoid any lurking NE2000 */ - if (inb(ioaddr + 0x408) == 0xff) { - mdelay(1); - retval = -ENODEV; - goto out1; - } - - /* We verify that it's a 3C503 board by checking the first three octets - of its ethernet address. */ - iobase_reg = inb(ioaddr+0x403); - membase_reg = inb(ioaddr+0x404); - /* ASIC location registers should be 0 or have only a single bit set. */ - if ((iobase_reg & (iobase_reg - 1)) || - (membase_reg & (membase_reg - 1))) { - retval = -ENODEV; - goto out1; - } - saved_406 = inb_p(ioaddr + 0x406); - outb_p(ECNTRL_RESET|ECNTRL_THIN, ioaddr + 0x406); /* Reset it... */ - outb_p(ECNTRL_THIN, ioaddr + 0x406); - /* Map the station addr PROM into the lower I/O ports. We now check - for both the old and new 3Com prefix */ - outb(ECNTRL_SAPROM|ECNTRL_THIN, ioaddr + 0x406); - vendor_id = inb(ioaddr)*0x10000 + inb(ioaddr + 1)*0x100 + inb(ioaddr + 2); - if ((vendor_id != OLD_3COM_ID) && (vendor_id != NEW_3COM_ID)) { - /* Restore the register we frobbed. */ - outb(saved_406, ioaddr + 0x406); - retval = -ENODEV; - goto out1; - } - - if (ei_debug && version_printed++ == 0) - pr_debug("%s", version); - - dev->base_addr = ioaddr; - - pr_info("%s: 3c503 at i/o base %#3x, node ", dev->name, ioaddr); - - /* Retrieve and print the ethernet address. */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + i); - pr_cont("%pM", dev->dev_addr); - - /* Map the 8390 back into the window. */ - outb(ECNTRL_THIN, ioaddr + 0x406); - - /* Check for EL2/16 as described in tech. man. */ - outb_p(E8390_PAGE0, ioaddr + E8390_CMD); - outb_p(0, ioaddr + EN0_DCFG); - outb_p(E8390_PAGE2, ioaddr + E8390_CMD); - wordlength = inb_p(ioaddr + EN0_DCFG) & ENDCFG_WTS; - outb_p(E8390_PAGE0, ioaddr + E8390_CMD); - - /* Probe for, turn on and clear the board's shared memory. */ - if (ei_debug > 2) - pr_cont(" memory jumpers %2.2x ", membase_reg); - outb(EGACFR_NORM, ioaddr + 0x405); /* Enable RAM */ - - /* This should be probed for (or set via an ioctl()) at run-time. - Right now we use a sleazy hack to pass in the interface number - at boot-time via the low bits of the mem_end field. That value is - unused, and the low bits would be discarded even if it was used. */ -#if defined(EI8390_THICK) || defined(EL2_AUI) - ei_status.interface_num = 1; -#else - ei_status.interface_num = dev->mem_end & 0xf; -#endif - pr_cont(", using %sternal xcvr.\n", ei_status.interface_num == 0 ? "in" : "ex"); - - if ((membase_reg & 0xf0) == 0) { - dev->mem_start = 0; - ei_status.name = "3c503-PIO"; - ei_status.mem = NULL; - } else { - dev->mem_start = ((membase_reg & 0xc0) ? 0xD8000 : 0xC8000) + - ((membase_reg & 0xA0) ? 0x4000 : 0); -#define EL2_MEMSIZE (EL2_MB1_STOP_PG - EL2_MB1_START_PG)*256 - ei_status.mem = ioremap(dev->mem_start, EL2_MEMSIZE); - -#ifdef EL2MEMTEST - /* This has never found an error, but someone might care. - Note that it only tests the 2nd 8kB on 16kB 3c503/16 - cards between card addr. 0x2000 and 0x3fff. */ - { /* Check the card's memory. */ - void __iomem *mem_base = ei_status.mem; - unsigned int test_val = 0xbbadf00d; - writel(0xba5eba5e, mem_base); - for (i = sizeof(test_val); i < EL2_MEMSIZE; i+=sizeof(test_val)) { - writel(test_val, mem_base + i); - if (readl(mem_base) != 0xba5eba5e || - readl(mem_base + i) != test_val) { - pr_warning("3c503: memory failure or memory address conflict.\n"); - dev->mem_start = 0; - ei_status.name = "3c503-PIO"; - iounmap(mem_base); - ei_status.mem = NULL; - break; - } - test_val += 0x55555555; - writel(0, mem_base + i); - } - } -#endif /* EL2MEMTEST */ - - if (dev->mem_start) - dev->mem_end = dev->mem_start + EL2_MEMSIZE; - - if (wordlength) { /* No Tx pages to skip over to get to Rx */ - ei_status.priv = 0; - ei_status.name = "3c503/16"; - } else { - ei_status.priv = TX_PAGES * 256; - ei_status.name = "3c503"; - } - } - - /* - Divide up the memory on the card. This is the same regardless of - whether shared-mem or PIO is used. For 16 bit cards (16kB RAM), - we use the entire 8k of bank1 for an Rx ring. We only use 3k - of the bank0 for 2 full size Tx packet slots. For 8 bit cards, - (8kB RAM) we use 3kB of bank1 for two Tx slots, and the remaining - 5kB for an Rx ring. */ - - if (wordlength) { - ei_status.tx_start_page = EL2_MB0_START_PG; - ei_status.rx_start_page = EL2_MB1_START_PG; - } else { - ei_status.tx_start_page = EL2_MB1_START_PG; - ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES; - } - - /* Finish setting the board's parameters. */ - ei_status.stop_page = EL2_MB1_STOP_PG; - ei_status.word16 = wordlength; - ei_status.reset_8390 = el2_reset_8390; - ei_status.get_8390_hdr = el2_get_8390_hdr; - ei_status.block_input = el2_block_input; - ei_status.block_output = el2_block_output; - - if (dev->irq == 2) - dev->irq = 9; - else if (dev->irq > 5 && dev->irq != 9) { - pr_warning("3c503: configured interrupt %d invalid, will use autoIRQ.\n", - dev->irq); - dev->irq = 0; - } - - ei_status.saved_irq = dev->irq; - - dev->netdev_ops = &el2_netdev_ops; - dev->ethtool_ops = &netdev_ethtool_ops; - - retval = register_netdev(dev); - if (retval) - goto out1; - - if (dev->mem_start) - pr_info("%s: %s - %dkB RAM, 8kB shared mem window at %#6lx-%#6lx.\n", - dev->name, ei_status.name, (wordlength+1)<<3, - dev->mem_start, dev->mem_end-1); - - else - { - ei_status.tx_start_page = EL2_MB1_START_PG; - ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES; - pr_info("%s: %s, %dkB RAM, using programmed I/O (REJUMPER for SHARED MEMORY).\n", - dev->name, ei_status.name, (wordlength+1)<<3); - } - release_region(ioaddr + 0x400, 8); - return 0; -out1: - release_region(ioaddr + 0x400, 8); -out: - release_region(ioaddr, EL2_IO_EXTENT); - return retval; -} - -static irqreturn_t el2_probe_interrupt(int irq, void *seen) -{ - *(bool *)seen = true; - return IRQ_HANDLED; -} - -static int -el2_open(struct net_device *dev) -{ - int retval; - - if (dev->irq < 2) { - static const int irqlist[] = {5, 9, 3, 4, 0}; - const int *irqp = irqlist; - - outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */ - do { - bool seen; - - retval = request_irq(*irqp, el2_probe_interrupt, 0, - dev->name, &seen); - if (retval == -EBUSY) - continue; - if (retval < 0) - goto err_disable; - - /* Twinkle the interrupt, and check if it's seen. */ - seen = false; - smp_wmb(); - outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR); - outb_p(0x00, E33G_IDCFR); - msleep(1); - free_irq(*irqp, &seen); - if (!seen) - continue; - - retval = request_irq(dev->irq = *irqp, eip_interrupt, 0, - dev->name, dev); - if (retval == -EBUSY) - continue; - if (retval < 0) - goto err_disable; - break; - } while (*++irqp); - - if (*irqp == 0) { - err_disable: - outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */ - return -EAGAIN; - } - } else { - if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) { - return retval; - } - } - - el2_init_card(dev); - eip_open(dev); - return 0; -} - -static int -el2_close(struct net_device *dev) -{ - free_irq(dev->irq, dev); - dev->irq = ei_status.saved_irq; - outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */ - - eip_close(dev); - return 0; -} - -/* This is called whenever we have a unrecoverable failure: - transmit timeout - Bad ring buffer packet header - */ -static void -el2_reset_8390(struct net_device *dev) -{ - if (ei_debug > 1) { - pr_debug("%s: Resetting the 3c503 board...", dev->name); - pr_cont(" %#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR), - E33G_CNTRL, inb(E33G_CNTRL), E33G_GACFR, inb(E33G_GACFR)); - } - outb_p(ECNTRL_RESET|ECNTRL_THIN, E33G_CNTRL); - ei_status.txing = 0; - outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); - el2_init_card(dev); - if (ei_debug > 1) - pr_cont("done\n"); -} - -/* Initialize the 3c503 GA registers after a reset. */ -static void -el2_init_card(struct net_device *dev) -{ - /* Unmap the station PROM and select the DIX or BNC connector. */ - outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); - - /* Set ASIC copy of rx's first and last+1 buffer pages */ - /* These must be the same as in the 8390. */ - outb(ei_status.rx_start_page, E33G_STARTPG); - outb(ei_status.stop_page, E33G_STOPPG); - - /* Point the vector pointer registers somewhere ?harmless?. */ - outb(0xff, E33G_VP2); /* Point at the ROM restart location 0xffff0 */ - outb(0xff, E33G_VP1); - outb(0x00, E33G_VP0); - /* Turn off all interrupts until we're opened. */ - outb_p(0x00, dev->base_addr + EN0_IMR); - /* Enable IRQs iff started. */ - outb(EGACFR_NORM, E33G_GACFR); - - /* Set the interrupt line. */ - outb_p((0x04 << (dev->irq == 9 ? 2 : dev->irq)), E33G_IDCFR); - outb_p((WRD_COUNT << 1), E33G_DRQCNT); /* Set burst size to 8 */ - outb_p(0x20, E33G_DMAAH); /* Put a valid addr in the GA DMA */ - outb_p(0x00, E33G_DMAAL); - return; /* We always succeed */ -} - -/* - * Either use the shared memory (if enabled on the board) or put the packet - * out through the ASIC FIFO. - */ -static void -el2_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - unsigned short int *wrd; - int boguscount; /* timeout counter */ - unsigned short word; /* temporary for better machine code */ - void __iomem *base = ei_status.mem; - - if (ei_status.word16) /* Tx packets go into bank 0 on EL2/16 card */ - outb(EGACFR_RSEL|EGACFR_TCM, E33G_GACFR); - else - outb(EGACFR_NORM, E33G_GACFR); - - if (base) { /* Shared memory transfer */ - memcpy_toio(base + ((start_page - ei_status.tx_start_page) << 8), - buf, count); - outb(EGACFR_NORM, E33G_GACFR); /* Back to bank1 in case on bank0 */ - return; - } - -/* - * No shared memory, put the packet out the other way. - * Set up then start the internal memory transfer to Tx Start Page - */ - - word = (unsigned short)start_page; - outb(word&0xFF, E33G_DMAAH); - outb(word>>8, E33G_DMAAL); - - outb_p((ei_status.interface_num ? ECNTRL_AUI : ECNTRL_THIN ) | ECNTRL_OUTPUT - | ECNTRL_START, E33G_CNTRL); - -/* - * Here I am going to write data to the FIFO as quickly as possible. - * Note that E33G_FIFOH is defined incorrectly. It is really - * E33G_FIFOL, the lowest port address for both the byte and - * word write. Variable 'count' is NOT checked. Caller must supply a - * valid count. Note that I may write a harmless extra byte to the - * 8390 if the byte-count was not even. - */ - wrd = (unsigned short int *) buf; - count = (count + 1) >> 1; - for(;;) - { - boguscount = 0x1000; - while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0) - { - if(!boguscount--) - { - pr_notice("%s: FIFO blocked in el2_block_output.\n", dev->name); - el2_reset_8390(dev); - goto blocked; - } - } - if(count > WRD_COUNT) - { - outsw(E33G_FIFOH, wrd, WRD_COUNT); - wrd += WRD_COUNT; - count -= WRD_COUNT; - } - else - { - outsw(E33G_FIFOH, wrd, count); - break; - } - } - blocked:; - outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); -} - -/* Read the 4 byte, page aligned 8390 specific header. */ -static void -el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - int boguscount; - void __iomem *base = ei_status.mem; - unsigned short word; - - if (base) { /* Use the shared memory. */ - void __iomem *hdr_start = base + ((ring_page - EL2_MB1_START_PG)<<8); - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); - hdr->count = le16_to_cpu(hdr->count); - return; - } - -/* - * No shared memory, use programmed I/O. - */ - - word = (unsigned short)ring_page; - outb(word&0xFF, E33G_DMAAH); - outb(word>>8, E33G_DMAAL); - - outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT - | ECNTRL_START, E33G_CNTRL); - boguscount = 0x1000; - while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0) - { - if(!boguscount--) - { - pr_notice("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name); - memset(hdr, 0x00, sizeof(struct e8390_pkt_hdr)); - el2_reset_8390(dev); - goto blocked; - } - } - insw(E33G_FIFOH, hdr, (sizeof(struct e8390_pkt_hdr))>> 1); - blocked:; - outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); -} - - -static void -el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - int boguscount = 0; - void __iomem *base = ei_status.mem; - unsigned short int *buf; - unsigned short word; - - /* Maybe enable shared memory just be to be safe... nahh.*/ - if (base) { /* Use the shared memory. */ - ring_offset -= (EL2_MB1_START_PG<<8); - if (ring_offset + count > EL2_MEMSIZE) { - /* We must wrap the input move. */ - int semi_count = EL2_MEMSIZE - ring_offset; - memcpy_fromio(skb->data, base + ring_offset, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, base + ei_status.priv, count); - } else { - memcpy_fromio(skb->data, base + ring_offset, count); - } - return; - } - -/* - * No shared memory, use programmed I/O. - */ - word = (unsigned short) ring_offset; - outb(word>>8, E33G_DMAAH); - outb(word&0xFF, E33G_DMAAL); - - outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT - | ECNTRL_START, E33G_CNTRL); - -/* - * Here I also try to get data as fast as possible. I am betting that I - * can read one extra byte without clobbering anything in the kernel because - * this would only occur on an odd byte-count and allocation of skb->data - * is word-aligned. Variable 'count' is NOT checked. Caller must check - * for a valid count. - * [This is currently quite safe.... but if one day the 3c503 explodes - * you know where to come looking ;)] - */ - - buf = (unsigned short int *) skb->data; - count = (count + 1) >> 1; - for(;;) - { - boguscount = 0x1000; - while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0) - { - if(!boguscount--) - { - pr_notice("%s: FIFO blocked in el2_block_input.\n", dev->name); - el2_reset_8390(dev); - goto blocked; - } - } - if(count > WRD_COUNT) - { - insw(E33G_FIFOH, buf, WRD_COUNT); - buf += WRD_COUNT; - count -= WRD_COUNT; - } - else - { - insw(E33G_FIFOH, buf, count); - break; - } - } - blocked:; - outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); -} - - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, -}; - -#ifdef MODULE -#define MAX_EL2_CARDS 4 /* Max number of EL2 cards per module */ - -static struct net_device *dev_el2[MAX_EL2_CARDS]; -static int io[MAX_EL2_CARDS]; -static int irq[MAX_EL2_CARDS]; -static int xcvr[MAX_EL2_CARDS]; /* choose int. or ext. xcvr */ -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(xcvr, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); -MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)"); -MODULE_DESCRIPTION("3Com ISA EtherLink II, II/16 (3c503, 3c503/16) driver"); -MODULE_LICENSE("GPL"); - -/* This is set up so that only a single autoprobe takes place per call. -ISA device autoprobes on a running machine are not recommended. */ -int __init -init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) { - if (io[this_dev] == 0) { - if (this_dev != 0) break; /* only autoprobe 1st one */ - pr_notice("3c503.c: Presently autoprobing (not recommended) for a single card.\n"); - } - dev = alloc_eip_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */ - if (do_el2_probe(dev) == 0) { - dev_el2[found++] = dev; - continue; - } - free_netdev(dev); - pr_warning("3c503.c: No 3c503 card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - /* NB: el2_close() handles free_irq */ - release_region(dev->base_addr, EL2_IO_EXTENT); - if (ei_status.mem) - iounmap(ei_status.mem); -} - -void __exit -cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) { - struct net_device *dev = dev_el2[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/3c503.h b/drivers/net/ethernet/8390/3c503.h deleted file mode 100644 index e2367b82a2ec..000000000000 --- a/drivers/net/ethernet/8390/3c503.h +++ /dev/null @@ -1,91 +0,0 @@ -/* Definitions for the 3Com 3c503 Etherlink 2. */ -/* This file is distributed under the GPL. - Many of these names and comments are directly from the Crynwr packet - drivers, which are released under the GPL. */ - -#define EL2H (dev->base_addr + 0x400) -#define EL2L (dev->base_addr) - -/* Vendor unique hardware addr. prefix. 3Com has 2 because they ran - out of available addresses on the first one... */ - -#define OLD_3COM_ID 0x02608c -#define NEW_3COM_ID 0x0020af - -/* Shared memory management parameters. NB: The 8 bit cards have only - one bank (MB1) which serves both Tx and Rx packet space. The 16bit - cards have 2 banks, MB0 for Tx packets, and MB1 for Rx packets. - You choose which bank appears in the sh. mem window with EGACFR_MBSn */ - -#define EL2_MB0_START_PG (0x00) /* EL2/16 Tx packets go in bank 0 */ -#define EL2_MB1_START_PG (0x20) /* First page of bank 1 */ -#define EL2_MB1_STOP_PG (0x40) /* Last page +1 of bank 1 */ - -/* 3Com 3c503 ASIC registers */ -#define E33G_STARTPG (EL2H+0) /* Start page, matching EN0_STARTPG */ -#define E33G_STOPPG (EL2H+1) /* Stop page, must match EN0_STOPPG */ -#define E33G_DRQCNT (EL2H+2) /* DMA burst count */ -#define E33G_IOBASE (EL2H+3) /* Read of I/O base jumpers. */ - /* (non-useful, but it also appears at the end of EPROM space) */ -#define E33G_ROMBASE (EL2H+4) /* Read of memory base jumpers. */ -#define E33G_GACFR (EL2H+5) /* Config/setup bits for the ASIC GA */ -#define E33G_CNTRL (EL2H+6) /* Board's main control register */ -#define E33G_STATUS (EL2H+7) /* Status on completions. */ -#define E33G_IDCFR (EL2H+8) /* Interrupt/DMA config register */ - /* (Which IRQ to assert, DMA chan to use) */ -#define E33G_DMAAH (EL2H+9) /* High byte of DMA address reg */ -#define E33G_DMAAL (EL2H+10) /* Low byte of DMA address reg */ -/* "Vector pointer" - if this address matches a read, the EPROM (rather than - shared RAM) is mapped into memory space. */ -#define E33G_VP2 (EL2H+11) -#define E33G_VP1 (EL2H+12) -#define E33G_VP0 (EL2H+13) -#define E33G_FIFOH (EL2H+14) /* FIFO for programmed I/O moves */ -#define E33G_FIFOL (EL2H+15) /* ... low byte of above. */ - -/* Bits in E33G_CNTRL register: */ - -#define ECNTRL_RESET (0x01) /* Software reset of the ASIC and 8390 */ -#define ECNTRL_THIN (0x02) /* Onboard xcvr enable, AUI disable */ -#define ECNTRL_AUI (0x00) /* Onboard xcvr disable, AUI enable */ -#define ECNTRL_SAPROM (0x04) /* Map the station address prom */ -#define ECNTRL_DBLBFR (0x20) /* FIFO configuration bit */ -#define ECNTRL_OUTPUT (0x40) /* PC-to-3C503 direction if 1 */ -#define ECNTRL_INPUT (0x00) /* 3C503-to-PC direction if 0 */ -#define ECNTRL_START (0x80) /* Start the DMA logic */ - -/* Bits in E33G_STATUS register: */ - -#define ESTAT_DPRDY (0x80) /* Data port (of FIFO) ready */ -#define ESTAT_UFLW (0x40) /* Tried to read FIFO when it was empty */ -#define ESTAT_OFLW (0x20) /* Tried to write FIFO when it was full */ -#define ESTAT_DTC (0x10) /* Terminal Count from PC bus DMA logic */ -#define ESTAT_DIP (0x08) /* DMA In Progress */ - -/* Bits in E33G_GACFR register: */ - -#define EGACFR_NIM (0x80) /* NIC interrupt mask */ -#define EGACFR_TCM (0x40) /* DMA term. count interrupt mask */ -#define EGACFR_RSEL (0x08) /* Map a bank of card mem into system mem */ -#define EGACFR_MBS2 (0x04) /* Memory bank select, bit 2. */ -#define EGACFR_MBS1 (0x02) /* Memory bank select, bit 1. */ -#define EGACFR_MBS0 (0x01) /* Memory bank select, bit 0. */ - -#define EGACFR_NORM (0x49) /* TCM | RSEL | MBS0 */ -#define EGACFR_IRQOFF (0xc9) /* TCM | RSEL | MBS0 | NIM */ - -/* - MBS2 MBS1 MBS0 Sh. mem windows card mem at: - ---- ---- ---- ----------------------------- - 0 0 0 0x0000 -- bank 0 - 0 0 1 0x2000 -- bank 1 (only choice for 8bit card) - 0 1 0 0x4000 -- bank 2, not used - 0 1 1 0x6000 -- bank 3, not used - -There was going to be a 32k card that used bank 2 and 3, but it -never got produced. - -*/ - - -/* End of 3C503 parameter definitions */ diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index e1219e037c04..1b78ca7a9786 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -6,8 +6,8 @@ config NET_VENDOR_8390 bool "National Semi-conductor 8390 devices" default y depends on NET_VENDOR_NATSEMI && (AMIGA_PCMCIA || PCI || SUPERH || \ - ISA || MCA || EISA || MAC || M32R || MACH_TX49XX || \ - MCA_LEGACY || H8300 || ARM || MIPS || ZORRO || PCMCIA || \ + ISA || MAC || M32R || MACH_TX49XX || \ + H8300 || ARM || MIPS || ZORRO || PCMCIA || \ EXPERIMENTAL) ---help--- If you have a network (Ethernet) card belonging to this class, say Y @@ -21,30 +21,6 @@ config NET_VENDOR_8390 if NET_VENDOR_8390 -config EL2 - tristate "3c503 \"EtherLink II\" support" - depends on ISA - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called 3c503. - -config AC3200 - tristate "Ansel Communications EISA 3200 support (EXPERIMENTAL)" - depends on PCI && (ISA || EISA) && EXPERIMENTAL - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called ac3200. - config PCMCIA_AXNET tristate "Asix AX88190 PCMCIA support" depends on PCMCIA @@ -74,54 +50,6 @@ config AX88796_93CX6 ---help--- Select this if your platform comes with an external 93CX6 eeprom. -config E2100 - tristate "Cabletron E21xx support" - depends on ISA - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called e2100. - -config ES3210 - tristate "Racal-Interlan EISA ES3210 support (EXPERIMENTAL)" - depends on PCI && EISA && EXPERIMENTAL - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called es3210. - -config HPLAN_PLUS - tristate "HP PCLAN+ (27247B and 27252A) support" - depends on ISA - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called hp-plus. - -config HPLAN - tristate "HP PCLAN (27245 and other 27xxx series) support" - depends on ISA - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called hp. - config HYDRA tristate "Hydra support" depends on ZORRO @@ -140,18 +68,6 @@ config ARM_ETHERH If you have an Acorn system with one of these network cards, you should say Y to this option if you wish to use it with Linux. -config LNE390 - tristate "Mylex EISA LNE390A/B support (EXPERIMENTAL)" - depends on PCI && EISA && EXPERIMENTAL - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called lne390. - config MAC8390 bool "Macintosh NS 8390 based ethernet cards" depends on MAC @@ -187,11 +103,7 @@ config NE2000 without a specific driver are compatible with NE2000. If you have a PCI NE2000 card however, say N here and Y to "PCI - NE2000 and clone support" under "EISA, VLB, PCI and on board - controllers" below. If you have a NE2000 card and are running on - an MCA system (a bus system used on some IBM PS/2 computers and - laptops), say N here and Y to "NE/2 (ne2000 MCA version) support", - below. + NE2000 and clone support" below. To compile this driver as a module, choose M here. The module will be called ne. @@ -226,19 +138,6 @@ config APNE To compile this driver as a module, choose M here: the module will be called apne. -config NE3210 - tristate "Novell/Eagle/Microdyne NE3210 EISA support (EXPERIMENTAL)" - depends on PCI && EISA && EXPERIMENTAL - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. Note that this driver - will NOT WORK for NE3200 cards as they are completely different. - - To compile this driver as a module, choose M here. The module - will be called ne3210. - config PCMCIA_PCNET tristate "NE2000 compatible PCMCIA support" depends on PCMCIA @@ -288,18 +187,6 @@ config ULTRA To compile this driver as a module, choose M here. The module will be called smc-ultra. -config ULTRA32 - tristate "SMC Ultra32 EISA support" - depends on EISA - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called smc-ultra32. - config WD80x3 tristate "WD80*3 support" depends on ISA diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile index f43038babf86..588954a79b2a 100644 --- a/drivers/net/ethernet/8390/Makefile +++ b/drivers/net/ethernet/8390/Makefile @@ -3,27 +3,17 @@ # obj-$(CONFIG_MAC8390) += mac8390.o -obj-$(CONFIG_AC3200) += ac3200.o 8390.o obj-$(CONFIG_APNE) += apne.o 8390.o obj-$(CONFIG_ARM_ETHERH) += etherh.o obj-$(CONFIG_AX88796) += ax88796.o -obj-$(CONFIG_E2100) += e2100.o 8390.o -obj-$(CONFIG_EL2) += 3c503.o 8390p.o -obj-$(CONFIG_ES3210) += es3210.o 8390.o -obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o -obj-$(CONFIG_HPLAN) += hp.o 8390p.o obj-$(CONFIG_HYDRA) += hydra.o 8390.o -obj-$(CONFIG_LNE390) += lne390.o 8390.o obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o obj-$(CONFIG_NE2000) += ne.o 8390p.o -obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o -obj-$(CONFIG_NE3210) += ne3210.o 8390.o obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o obj-$(CONFIG_STNIC) += stnic.o 8390.o obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o -obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o obj-$(CONFIG_WD80x3) += wd.o 8390.o obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o diff --git a/drivers/net/ethernet/8390/ac3200.c b/drivers/net/ethernet/8390/ac3200.c deleted file mode 100644 index ccf07942ff6e..000000000000 --- a/drivers/net/ethernet/8390/ac3200.c +++ /dev/null @@ -1,431 +0,0 @@ -/* ac3200.c: A driver for the Ansel Communications EISA ethernet adaptor. */ -/* - Written 1993, 1994 by Donald Becker. - Copyright 1993 United States Government as represented by the Director, - National Security Agency. This software may only be used and distributed - according to the terms of the GNU General Public License as modified by SRC, - incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - This is driver for the Ansel Communications Model 3200 EISA Ethernet LAN - Adapter. The programming information is from the users manual, as related - by glee@ardnassak.math.clemson.edu. - - Changelog: - - Paul Gortmaker 05/98 : add support for shared mem above 1MB. - - */ - -static const char version[] = - "ac3200.c:v1.01 7/1/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#include <linux/module.h> -#include <linux/eisa.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/init.h> -#include <linux/interrupt.h> - -#include <asm/io.h> -#include <asm/irq.h> - -#include "8390.h" - -#define DRV_NAME "ac3200" - -/* Offsets from the base address. */ -#define AC_NIC_BASE 0x00 -#define AC_SA_PROM 0x16 /* The station address PROM. */ -#define AC_ADDR0 0x00 /* Prefix station address values. */ -#define AC_ADDR1 0x40 -#define AC_ADDR2 0x90 -#define AC_ID_PORT 0xC80 -#define AC_EISA_ID 0x0110d305 -#define AC_RESET_PORT 0xC84 -#define AC_RESET 0x00 -#define AC_ENABLE 0x01 -#define AC_CONFIG 0xC90 /* The configuration port. */ - -#define AC_IO_EXTENT 0x20 - /* Actually accessed is: - * AC_NIC_BASE (0-15) - * AC_SA_PROM (0-5) - * AC_ID_PORT (0-3) - * AC_RESET_PORT - * AC_CONFIG - */ - -/* Decoding of the configuration register. */ -static unsigned char config2irqmap[8] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; -static int addrmap[8] = -{0xFF0000, 0xFE0000, 0xFD0000, 0xFFF0000, 0xFFE0000, 0xFFC0000, 0xD0000, 0 }; -static const char *port_name[4] = { "10baseT", "invalid", "AUI", "10base2"}; - -#define config2irq(configval) config2irqmap[((configval) >> 3) & 7] -#define config2mem(configval) addrmap[(configval) & 7] -#define config2name(configval) port_name[((configval) >> 6) & 3] - -/* First and last 8390 pages. */ -#define AC_START_PG 0x00 /* First page of 8390 TX buffer */ -#define AC_STOP_PG 0x80 /* Last page +1 of the 8390 RX ring */ - -static int ac_probe1(int ioaddr, struct net_device *dev); - -static int ac_open(struct net_device *dev); -static void ac_reset_8390(struct net_device *dev); -static void ac_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void ac_block_output(struct net_device *dev, const int count, - const unsigned char *buf, const int start_page); -static void ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); - -static int ac_close_card(struct net_device *dev); - - -/* Probe for the AC3200. - - The AC3200 can be identified by either the EISA configuration registers, - or the unique value in the station address PROM. - */ - -static int __init do_ac3200_probe(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - int irq = dev->irq; - int mem_start = dev->mem_start; - - if (ioaddr > 0x1ff) /* Check a single specified location. */ - return ac_probe1(ioaddr, dev); - else if (ioaddr > 0) /* Don't probe at all. */ - return -ENXIO; - - if ( ! EISA_bus) - return -ENXIO; - - for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { - if (ac_probe1(ioaddr, dev) == 0) - return 0; - dev->irq = irq; - dev->mem_start = mem_start; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init ac3200_probe(int unit) -{ - struct net_device *dev = alloc_ei_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_ac3200_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops ac_netdev_ops = { - .ndo_open = ac_open, - .ndo_stop = ac_close_card, - - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_rx_mode = ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, -#endif -}; - -static int __init ac_probe1(int ioaddr, struct net_device *dev) -{ - int i, retval; - - if (!request_region(ioaddr, AC_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - if (inb_p(ioaddr + AC_ID_PORT) == 0xff) { - retval = -ENODEV; - goto out; - } - - if (inl(ioaddr + AC_ID_PORT) != AC_EISA_ID) { - retval = -ENODEV; - goto out; - } - -#ifndef final_version - printk(KERN_DEBUG "AC3200 ethercard configuration register is %#02x," - " EISA ID %02x %02x %02x %02x.\n", inb(ioaddr + AC_CONFIG), - inb(ioaddr + AC_ID_PORT + 0), inb(ioaddr + AC_ID_PORT + 1), - inb(ioaddr + AC_ID_PORT + 2), inb(ioaddr + AC_ID_PORT + 3)); -#endif - - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i); - - printk(KERN_DEBUG "AC3200 in EISA slot %d, node %pM", - ioaddr/0x1000, dev->dev_addr); -#if 0 - /* Check the vendor ID/prefix. Redundant after checking the EISA ID */ - if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0 - || inb(ioaddr + AC_SA_PROM + 1) != AC_ADDR1 - || inb(ioaddr + AC_SA_PROM + 2) != AC_ADDR2 ) { - printk(", not found (invalid prefix).\n"); - retval = -ENODEV; - goto out; - } -#endif - - /* Assign and allocate the interrupt now. */ - if (dev->irq == 0) { - dev->irq = config2irq(inb(ioaddr + AC_CONFIG)); - printk(", using"); - } else { - dev->irq = irq_canonicalize(dev->irq); - printk(", assigning"); - } - - retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); - if (retval) { - printk (" nothing! Unable to get IRQ %d.\n", dev->irq); - goto out; - } - - printk(" IRQ %d, %s port\n", dev->irq, port_name[dev->if_port]); - - dev->base_addr = ioaddr; - -#ifdef notyet - if (dev->mem_start) { /* Override the value from the board. */ - for (i = 0; i < 7; i++) - if (addrmap[i] == dev->mem_start) - break; - if (i >= 7) - i = 0; - outb((inb(ioaddr + AC_CONFIG) & ~7) | i, ioaddr + AC_CONFIG); - } -#endif - - dev->if_port = inb(ioaddr + AC_CONFIG) >> 6; - dev->mem_start = config2mem(inb(ioaddr + AC_CONFIG)); - - printk("%s: AC3200 at %#3x with %dkB memory at physical address %#lx.\n", - dev->name, ioaddr, AC_STOP_PG/4, dev->mem_start); - - /* - * BEWARE!! Some dain-bramaged EISA SCUs will allow you to put - * the card mem within the region covered by `normal' RAM !!! - * - * ioremap() will fail in that case. - */ - ei_status.mem = ioremap(dev->mem_start, AC_STOP_PG*0x100); - if (!ei_status.mem) { - printk(KERN_ERR "ac3200.c: Unable to remap card memory above 1MB !!\n"); - printk(KERN_ERR "ac3200.c: Try using EISA SCU to set memory below 1MB.\n"); - printk(KERN_ERR "ac3200.c: Driver NOT installed.\n"); - retval = -EINVAL; - goto out1; - } - printk("ac3200.c: remapped %dkB card memory to virtual address %p\n", - AC_STOP_PG/4, ei_status.mem); - - dev->mem_start = (unsigned long)ei_status.mem; - dev->mem_end = dev->mem_start + (AC_STOP_PG - AC_START_PG)*256; - - ei_status.name = "AC3200"; - ei_status.tx_start_page = AC_START_PG; - ei_status.rx_start_page = AC_START_PG + TX_PAGES; - ei_status.stop_page = AC_STOP_PG; - ei_status.word16 = 1; - - if (ei_debug > 0) - printk(version); - - ei_status.reset_8390 = &ac_reset_8390; - ei_status.block_input = &ac_block_input; - ei_status.block_output = &ac_block_output; - ei_status.get_8390_hdr = &ac_get_8390_hdr; - - dev->netdev_ops = &ac_netdev_ops; - NS8390_init(dev, 0); - - retval = register_netdev(dev); - if (retval) - goto out2; - return 0; -out2: - if (ei_status.reg0) - iounmap(ei_status.mem); -out1: - free_irq(dev->irq, dev); -out: - release_region(ioaddr, AC_IO_EXTENT); - return retval; -} - -static int ac_open(struct net_device *dev) -{ -#ifdef notyet - /* Someday we may enable the IRQ and shared memory here. */ - int ioaddr = dev->base_addr; -#endif - - ei_open(dev); - return 0; -} - -static void ac_reset_8390(struct net_device *dev) -{ - ushort ioaddr = dev->base_addr; - - outb(AC_RESET, ioaddr + AC_RESET_PORT); - if (ei_debug > 1) printk("resetting AC3200, t=%ld...", jiffies); - - ei_status.txing = 0; - outb(AC_ENABLE, ioaddr + AC_RESET_PORT); - if (ei_debug > 1) printk("reset done\n"); -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ - -static void -ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - void __iomem *hdr_start = ei_status.mem + ((ring_page - AC_START_PG)<<8); - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); -} - -/* Block input and output are easy on shared memory ethercards, the only - complication is when the ring buffer wraps. */ - -static void ac_block_input(struct net_device *dev, int count, struct sk_buff *skb, - int ring_offset) -{ - void __iomem *start = ei_status.mem + ring_offset - AC_START_PG*256; - - if (ring_offset + count > AC_STOP_PG*256) { - /* We must wrap the input move. */ - int semi_count = AC_STOP_PG*256 - ring_offset; - memcpy_fromio(skb->data, start, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, - ei_status.mem + TX_PAGES*256, count); - } else { - memcpy_fromio(skb->data, start, count); - } -} - -static void ac_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - void __iomem *shmem = ei_status.mem + ((start_page - AC_START_PG)<<8); - - memcpy_toio(shmem, buf, count); -} - -static int ac_close_card(struct net_device *dev) -{ - if (ei_debug > 1) - printk("%s: Shutting down ethercard.\n", dev->name); - -#ifdef notyet - /* We should someday disable shared memory and interrupts. */ - outb(0x00, ioaddr + 6); /* Disable interrupts. */ - free_irq(dev->irq, dev); -#endif - - ei_close(dev); - return 0; -} - -#ifdef MODULE -#define MAX_AC32_CARDS 4 /* Max number of AC32 cards per module */ -static struct net_device *dev_ac32[MAX_AC32_CARDS]; -static int io[MAX_AC32_CARDS]; -static int irq[MAX_AC32_CARDS]; -static int mem[MAX_AC32_CARDS]; -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s)"); -MODULE_PARM_DESC(mem, "Memory base address(es)"); -MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver"); -MODULE_LICENSE("GPL"); - -static int __init ac3200_module_init(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) { - if (io[this_dev] == 0 && this_dev != 0) - break; - dev = alloc_ei_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - dev->mem_start = mem[this_dev]; /* Currently ignored by driver */ - if (do_ac3200_probe(dev) == 0) { - dev_ac32[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "ac3200.c: No ac3200 card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - /* Someday free_irq may be in ac_close_card() */ - free_irq(dev->irq, dev); - release_region(dev->base_addr, AC_IO_EXTENT); - iounmap(ei_status.mem); -} - -static void __exit ac3200_module_exit(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) { - struct net_device *dev = dev_ac32[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -module_init(ac3200_module_init); -module_exit(ac3200_module_exit); -#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 70dba5d01ad3..cab306a9888e 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -358,7 +358,7 @@ static int ax_mii_probe(struct net_device *dev) return -ENODEV; } - ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0, + ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, PHY_INTERFACE_MODE_MII); if (ret) { netdev_err(dev, "Could not attach to PHY\n"); @@ -469,9 +469,9 @@ static void ax_get_drvinfo(struct net_device *dev, { struct platform_device *pdev = to_platform_device(dev->dev.parent); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pdev->name); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); } static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/8390/e2100.c b/drivers/net/ethernet/8390/e2100.c deleted file mode 100644 index ed55ce85ebbf..000000000000 --- a/drivers/net/ethernet/8390/e2100.c +++ /dev/null @@ -1,489 +0,0 @@ -/* e2100.c: A Cabletron E2100 series ethernet driver for linux. */ -/* - Written 1993-1994 by Donald Becker. - - Copyright 1994 by Donald Becker. - Copyright 1993 United States Government as represented by the - Director, National Security Agency. This software may be used and - distributed according to the terms of the GNU General Public License, - incorporated herein by reference. - - This is a driver for the Cabletron E2100 series ethercards. - - The Author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - The E2100 series ethercard is a fairly generic shared memory 8390 - implementation. The only unusual aspect is the way the shared memory - registers are set: first you do an inb() in what is normally the - station address region, and the low three bits of next outb() *address* - is used as the write value for that register. Either someone wasn't - too used to dem bit en bites, or they were trying to obfuscate the - programming interface. - - There is an additional complication when setting the window on the packet - buffer. You must first do a read into the packet buffer region with the - low 8 address bits the address setting the page for the start of the packet - buffer window, and then do the above operation. See mem_on() for details. - - One bug on the chip is that even a hard reset won't disable the memory - window, usually resulting in a hung machine if mem_off() isn't called. - If this happens, you must power down the machine for about 30 seconds. -*/ - -static const char version[] = - "e2100.c:v1.01 7/21/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/ioport.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/delay.h> - -#include <asm/io.h> - -#include "8390.h" - -#define DRV_NAME "e2100" - -static int e21_probe_list[] = {0x300, 0x280, 0x380, 0x220, 0}; - -/* Offsets from the base_addr. - Read from the ASIC register, and the low three bits of the next outb() - address is used to set the corresponding register. */ -#define E21_NIC_OFFSET 0 /* Offset to the 8390 NIC. */ -#define E21_ASIC 0x10 -#define E21_MEM_ENABLE 0x10 -#define E21_MEM_ON 0x05 /* Enable memory in 16 bit mode. */ -#define E21_MEM_ON_8 0x07 /* Enable memory in 8 bit mode. */ -#define E21_MEM_BASE 0x11 -#define E21_IRQ_LOW 0x12 /* The low three bits of the IRQ number. */ -#define E21_IRQ_HIGH 0x14 /* The high IRQ bit and media select ... */ -#define E21_MEDIA 0x14 /* (alias). */ -#define E21_ALT_IFPORT 0x02 /* Set to use the other (BNC,AUI) port. */ -#define E21_BIG_MEM 0x04 /* Use a bigger (64K) buffer (we don't) */ -#define E21_SAPROM 0x10 /* Offset to station address data. */ -#define E21_IO_EXTENT 0x20 - -static inline void mem_on(short port, volatile char __iomem *mem_base, - unsigned char start_page ) -{ - /* This is a little weird: set the shared memory window by doing a - read. The low address bits specify the starting page. */ - readb(mem_base+start_page); - inb(port + E21_MEM_ENABLE); - outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON); -} - -static inline void mem_off(short port) -{ - inb(port + E21_MEM_ENABLE); - outb(0x00, port + E21_MEM_ENABLE); -} - -/* In other drivers I put the TX pages first, but the E2100 window circuitry - is designed to have a 4K Tx region last. The windowing circuitry wraps the - window at 0x2fff->0x0000 so that the packets at e.g. 0x2f00 in the RX ring - appear contiguously in the window. */ -#define E21_RX_START_PG 0x00 /* First page of RX buffer */ -#define E21_RX_STOP_PG 0x30 /* Last page +1 of RX ring */ -#define E21_BIG_RX_STOP_PG 0xF0 /* Last page +1 of RX ring */ -#define E21_TX_START_PG E21_RX_STOP_PG /* First page of TX buffer */ - -static int e21_probe1(struct net_device *dev, int ioaddr); - -static int e21_open(struct net_device *dev); -static void e21_reset_8390(struct net_device *dev); -static void e21_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void e21_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); -static void e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static int e21_open(struct net_device *dev); -static int e21_close(struct net_device *dev); - - -/* Probe for the E2100 series ethercards. These cards have an 8390 at the - base address and the station address at both offset 0x10 and 0x18. I read - the station address from offset 0x18 to avoid the dataport of NE2000 - ethercards, and look for Ctron's unique ID (first three octets of the - station address). - */ - -static int __init do_e2100_probe(struct net_device *dev) -{ - int *port; - int base_addr = dev->base_addr; - int irq = dev->irq; - - if (base_addr > 0x1ff) /* Check a single specified location. */ - return e21_probe1(dev, base_addr); - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - for (port = e21_probe_list; *port; port++) { - dev->irq = irq; - if (e21_probe1(dev, *port) == 0) - return 0; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init e2100_probe(int unit) -{ - struct net_device *dev = alloc_ei_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_e2100_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops e21_netdev_ops = { - .ndo_open = e21_open, - .ndo_stop = e21_close, - - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_rx_mode = ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, -#endif -}; - -static int __init e21_probe1(struct net_device *dev, int ioaddr) -{ - int i, status, retval; - unsigned char *station_addr = dev->dev_addr; - static unsigned version_printed; - - if (!request_region(ioaddr, E21_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - /* First check the station address for the Ctron prefix. */ - if (inb(ioaddr + E21_SAPROM + 0) != 0x00 || - inb(ioaddr + E21_SAPROM + 1) != 0x00 || - inb(ioaddr + E21_SAPROM + 2) != 0x1d) { - retval = -ENODEV; - goto out; - } - - /* Verify by making certain that there is a 8390 at there. */ - outb(E8390_NODMA + E8390_STOP, ioaddr); - udelay(1); /* we want to delay one I/O cycle - which is 2MHz */ - status = inb(ioaddr); - if (status != 0x21 && status != 0x23) { - retval = -ENODEV; - goto out; - } - - /* Read the station address PROM. */ - for (i = 0; i < 6; i++) - station_addr[i] = inb(ioaddr + E21_SAPROM + i); - - inb(ioaddr + E21_MEDIA); /* Point to media selection. */ - outb(0, ioaddr + E21_ASIC); /* and disable the secondary interface. */ - - if (ei_debug && version_printed++ == 0) - printk(version); - - for (i = 0; i < 6; i++) - printk(" %02X", station_addr[i]); - - if (dev->irq < 2) { - static const int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4}; - for (i = 0; i < ARRAY_SIZE(irqlist); i++) - if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) { - dev->irq = irqlist[i]; - break; - } - if (i >= ARRAY_SIZE(irqlist)) { - printk(" unable to get IRQ %d.\n", dev->irq); - retval = -EAGAIN; - goto out; - } - } else if (dev->irq == 2) /* Fixup luser bogosity: IRQ2 is really IRQ9 */ - dev->irq = 9; - - /* The 8390 is at the base address. */ - dev->base_addr = ioaddr; - - ei_status.name = "E2100"; - ei_status.word16 = 1; - ei_status.tx_start_page = E21_TX_START_PG; - ei_status.rx_start_page = E21_RX_START_PG; - ei_status.stop_page = E21_RX_STOP_PG; - ei_status.saved_irq = dev->irq; - - /* Check the media port used. The port can be passed in on the - low mem_end bits. */ - if (dev->mem_end & 15) - dev->if_port = dev->mem_end & 7; - else { - dev->if_port = 0; - inb(ioaddr + E21_MEDIA); /* Turn automatic media detection on. */ - for(i = 0; i < 6; i++) - if (station_addr[i] != inb(ioaddr + E21_SAPROM + 8 + i)) { - dev->if_port = 1; - break; - } - } - - /* Never map in the E21 shared memory unless you are actively using it. - Also, the shared memory has effective only one setting -- spread all - over the 128K region! */ - if (dev->mem_start == 0) - dev->mem_start = 0xd0000; - - ei_status.mem = ioremap(dev->mem_start, 2*1024); - if (!ei_status.mem) { - printk("unable to remap memory\n"); - retval = -EAGAIN; - goto out; - } - -#ifdef notdef - /* These values are unused. The E2100 has a 2K window into the packet - buffer. The window can be set to start on any page boundary. */ - ei_status.rmem_start = dev->mem_start + TX_PAGES*256; - dev->mem_end = ei_status.rmem_end = dev->mem_start + 2*1024; -#endif - - printk(", IRQ %d, %s media, memory @ %#lx.\n", dev->irq, - dev->if_port ? "secondary" : "primary", dev->mem_start); - - ei_status.reset_8390 = &e21_reset_8390; - ei_status.block_input = &e21_block_input; - ei_status.block_output = &e21_block_output; - ei_status.get_8390_hdr = &e21_get_8390_hdr; - - dev->netdev_ops = &e21_netdev_ops; - NS8390_init(dev, 0); - - retval = register_netdev(dev); - if (retval) - goto out; - return 0; -out: - release_region(ioaddr, E21_IO_EXTENT); - return retval; -} - -static int -e21_open(struct net_device *dev) -{ - short ioaddr = dev->base_addr; - int retval; - - if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) - return retval; - - /* Set the interrupt line and memory base on the hardware. */ - inb(ioaddr + E21_IRQ_LOW); - outb(0, ioaddr + E21_ASIC + (dev->irq & 7)); - inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */ - outb(0, ioaddr + E21_ASIC + (dev->irq > 7 ? 1:0) - + (dev->if_port ? E21_ALT_IFPORT : 0)); - inb(ioaddr + E21_MEM_BASE); - outb(0, ioaddr + E21_ASIC + ((dev->mem_start >> 17) & 7)); - - ei_open(dev); - return 0; -} - -static void -e21_reset_8390(struct net_device *dev) -{ - short ioaddr = dev->base_addr; - - outb(0x01, ioaddr); - if (ei_debug > 1) printk("resetting the E2180x3 t=%ld...", jiffies); - ei_status.txing = 0; - - /* Set up the ASIC registers, just in case something changed them. */ - - if (ei_debug > 1) printk("reset done\n"); -} - -/* Grab the 8390 specific header. We put the 2k window so the header page - appears at the start of the shared memory. */ - -static void -e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - - short ioaddr = dev->base_addr; - char __iomem *shared_mem = ei_status.mem; - - mem_on(ioaddr, shared_mem, ring_page); - -#ifdef notdef - /* Officially this is what we are doing, but the readl() is faster */ - memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr)); -#else - ((unsigned int*)hdr)[0] = readl(shared_mem); -#endif - - /* Turn off memory access: we would need to reprogram the window anyway. */ - mem_off(ioaddr); - -} - -/* Block input and output are easy on shared memory ethercards. - The E21xx makes block_input() especially easy by wrapping the top - ring buffer to the bottom automatically. */ -static void -e21_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - short ioaddr = dev->base_addr; - char __iomem *shared_mem = ei_status.mem; - - mem_on(ioaddr, shared_mem, (ring_offset>>8)); - - memcpy_fromio(skb->data, ei_status.mem + (ring_offset & 0xff), count); - - mem_off(ioaddr); -} - -static void -e21_block_output(struct net_device *dev, int count, const unsigned char *buf, - int start_page) -{ - short ioaddr = dev->base_addr; - volatile char __iomem *shared_mem = ei_status.mem; - - /* Set the shared memory window start by doing a read, with the low address - bits specifying the starting page. */ - readb(shared_mem + start_page); - mem_on(ioaddr, shared_mem, start_page); - - memcpy_toio(shared_mem, buf, count); - mem_off(ioaddr); -} - -static int -e21_close(struct net_device *dev) -{ - short ioaddr = dev->base_addr; - - if (ei_debug > 1) - printk("%s: Shutting down ethercard.\n", dev->name); - - free_irq(dev->irq, dev); - dev->irq = ei_status.saved_irq; - - /* Shut off the interrupt line and secondary interface. */ - inb(ioaddr + E21_IRQ_LOW); - outb(0, ioaddr + E21_ASIC); - inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */ - outb(0, ioaddr + E21_ASIC); - - ei_close(dev); - - /* Double-check that the memory has been turned off, because really - really bad things happen if it isn't. */ - mem_off(ioaddr); - - return 0; -} - - -#ifdef MODULE -#define MAX_E21_CARDS 4 /* Max number of E21 cards per module */ -static struct net_device *dev_e21[MAX_E21_CARDS]; -static int io[MAX_E21_CARDS]; -static int irq[MAX_E21_CARDS]; -static int mem[MAX_E21_CARDS]; -static int xcvr[MAX_E21_CARDS]; /* choose int. or ext. xcvr */ - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); -module_param_array(xcvr, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s)"); -MODULE_PARM_DESC(mem, " memory base address(es)"); -MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)"); -MODULE_DESCRIPTION("Cabletron E2100 ISA ethernet driver"); -MODULE_LICENSE("GPL"); - -/* This is set up so that only a single autoprobe takes place per call. -ISA device autoprobes on a running machine are not recommended. */ - -int __init init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) { - if (io[this_dev] == 0) { - if (this_dev != 0) break; /* only autoprobe 1st one */ - printk(KERN_NOTICE "e2100.c: Presently autoprobing (not recommended) for a single card.\n"); - } - dev = alloc_ei_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - dev->mem_start = mem[this_dev]; - dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */ - if (do_e2100_probe(dev) == 0) { - dev_e21[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "e2100.c: No E2100 card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - /* NB: e21_close() handles free_irq */ - iounmap(ei_status.mem); - release_region(dev->base_addr, E21_IO_EXTENT); -} - -void __exit -cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) { - struct net_device *dev = dev_e21[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/es3210.c b/drivers/net/ethernet/8390/es3210.c deleted file mode 100644 index ba1b5c95531f..000000000000 --- a/drivers/net/ethernet/8390/es3210.c +++ /dev/null @@ -1,445 +0,0 @@ -/* - es3210.c - - Linux driver for Racal-Interlan ES3210 EISA Network Adapter - - Copyright (C) 1996, Paul Gortmaker. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - Information and Code Sources: - - 1) The existing myriad of Linux 8390 drivers written by Donald Becker. - - 2) Once again Russ Nelson's asm packet driver provided additional info. - - 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg files. - Too bad it doesn't work -- see below. - - The ES3210 is an EISA shared memory NS8390 implementation. Note - that all memory copies to/from the board must be 32bit transfers. - Which rules out using eth_io_copy_and_sum() in this driver. - - Apparently there are two slightly different revisions of the - card, since there are two distinct EISA cfg files (!rii0101.cfg - and !rii0102.cfg) One has media select in the cfg file and the - other doesn't. Hopefully this will work with either. - - That is about all I can tell you about it, having never actually - even seen one of these cards. :) Try http://www.interlan.com - if you want more info. - - Thanks go to Mark Salazar for testing v0.02 of this driver. - - Bugs, to-fix, etc: - - 1) The EISA cfg ports that are *supposed* to have the IRQ and shared - mem values just read 0xff all the time. Hrrmpf. Apparently the - same happens with the packet driver as the code for reading - these registers is disabled there. In the meantime, boot with: - ether=<IRQ>,0,0x<shared_mem_addr>,eth0 to override the IRQ and - shared memory detection. (The i/o port detection is okay.) - - 2) Module support currently untested. Probably works though. - -*/ - -static const char version[] = - "es3210.c: Driver revision v0.03, 14/09/96\n"; - -#include <linux/module.h> -#include <linux/eisa.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> - -#include <asm/io.h> - -#include "8390.h" - -static int es_probe1(struct net_device *dev, int ioaddr); - -static void es_reset_8390(struct net_device *dev); - -static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); -static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); -static void es_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page); - -#define ES_START_PG 0x00 /* First page of TX buffer */ -#define ES_STOP_PG 0x40 /* Last page +1 of RX ring */ - -#define ES_IO_EXTENT 0x37 /* The cfg file says 0xc90 -> 0xcc7 */ -#define ES_ID_PORT 0xc80 /* Same for all EISA cards */ -#define ES_SA_PROM 0xc90 /* Start of e'net addr. */ -#define ES_RESET_PORT 0xc84 /* From the packet driver source */ -#define ES_NIC_OFFSET 0xca0 /* Hello, the 8390 is *here* */ - -#define ES_ADDR0 0x02 /* 3 byte vendor prefix */ -#define ES_ADDR1 0x07 -#define ES_ADDR2 0x01 - -/* - * Two card revisions. EISA ID's are always rev. minor, rev. major,, and - * then the three vendor letters stored in 5 bits each, with an "a" = 1. - * For eg: "rii" = 10010 01001 01001 = 0x4929, which is how the EISA - * config utility determines automagically what config file(s) to use. - */ -#define ES_EISA_ID1 0x01012949 /* !rii0101.cfg */ -#define ES_EISA_ID2 0x02012949 /* !rii0102.cfg */ - -#define ES_CFG1 0xcc0 /* IOPORT(1) --> IOPORT(6) in cfg file */ -#define ES_CFG2 0xcc1 -#define ES_CFG3 0xcc2 -#define ES_CFG4 0xcc3 -#define ES_CFG5 0xcc4 -#define ES_CFG6 0xc84 /* NB: 0xc84 is also "reset" port. */ - -/* - * You can OR any of the following bits together and assign it - * to ES_DEBUG to get verbose driver info during operation. - * Some of these don't do anything yet. - */ - -#define ES_D_PROBE 0x01 -#define ES_D_RX_PKT 0x02 -#define ES_D_TX_PKT 0x04 -#define ED_D_IRQ 0x08 - -#define ES_DEBUG 0 - -static unsigned char lo_irq_map[] __initdata = {3, 4, 5, 6, 7, 9, 10}; -static unsigned char hi_irq_map[] __initdata = {11, 12, 0, 14, 0, 0, 0, 15}; - -/* - * Probe for the card. The best way is to read the EISA ID if it - * is known. Then we check the prefix of the station address - * PROM for a match against the Racal-Interlan assigned value. - */ - -static int __init do_es_probe(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - int irq = dev->irq; - int mem_start = dev->mem_start; - - if (ioaddr > 0x1ff) /* Check a single specified location. */ - return es_probe1(dev, ioaddr); - else if (ioaddr > 0) /* Don't probe at all. */ - return -ENXIO; - - if (!EISA_bus) { -#if ES_DEBUG & ES_D_PROBE - printk("es3210.c: Not EISA bus. Not probing high ports.\n"); -#endif - return -ENXIO; - } - - /* EISA spec allows for up to 16 slots, but 8 is typical. */ - for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { - if (es_probe1(dev, ioaddr) == 0) - return 0; - dev->irq = irq; - dev->mem_start = mem_start; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init es_probe(int unit) -{ - struct net_device *dev = alloc_ei_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_es_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static int __init es_probe1(struct net_device *dev, int ioaddr) -{ - int i, retval; - unsigned long eisa_id; - - if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210")) - return -ENODEV; - -#if ES_DEBUG & ES_D_PROBE - printk("es3210.c: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + ES_ID_PORT)); - printk("es3210.c: config regs: %#x %#x %#x %#x %#x %#x\n", - inb(ioaddr + ES_CFG1), inb(ioaddr + ES_CFG2), inb(ioaddr + ES_CFG3), - inb(ioaddr + ES_CFG4), inb(ioaddr + ES_CFG5), inb(ioaddr + ES_CFG6)); -#endif - -/* Check the EISA ID of the card. */ - eisa_id = inl(ioaddr + ES_ID_PORT); - if ((eisa_id != ES_EISA_ID1) && (eisa_id != ES_EISA_ID2)) { - retval = -ENODEV; - goto out; - } - - for (i = 0; i < ETH_ALEN ; i++) - dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i); - -/* Check the Racal vendor ID as well. */ - if (dev->dev_addr[0] != ES_ADDR0 || - dev->dev_addr[1] != ES_ADDR1 || - dev->dev_addr[2] != ES_ADDR2) { - printk("es3210.c: card not found %pM (invalid_prefix).\n", - dev->dev_addr); - retval = -ENODEV; - goto out; - } - - printk("es3210.c: ES3210 rev. %ld at %#x, node %pM", - eisa_id>>24, ioaddr, dev->dev_addr); - - /* Snarf the interrupt now. */ - if (dev->irq == 0) { - unsigned char hi_irq = inb(ioaddr + ES_CFG2) & 0x07; - unsigned char lo_irq = inb(ioaddr + ES_CFG1) & 0xfe; - - if (hi_irq != 0) { - dev->irq = hi_irq_map[hi_irq - 1]; - } else { - int i = 0; - while (lo_irq > (1<<i)) i++; - dev->irq = lo_irq_map[i]; - } - printk(" using IRQ %d", dev->irq); -#if ES_DEBUG & ES_D_PROBE - printk("es3210.c: hi_irq %#x, lo_irq %#x, dev->irq = %d\n", - hi_irq, lo_irq, dev->irq); -#endif - } else { - if (dev->irq == 2) - dev->irq = 9; /* Doh! */ - printk(" assigning IRQ %d", dev->irq); - } - - if (request_irq(dev->irq, ei_interrupt, 0, "es3210", dev)) { - printk (" unable to get IRQ %d.\n", dev->irq); - retval = -EAGAIN; - goto out; - } - - if (dev->mem_start == 0) { - unsigned char mem_enabled = inb(ioaddr + ES_CFG2) & 0xc0; - unsigned char mem_bits = inb(ioaddr + ES_CFG3) & 0x07; - - if (mem_enabled != 0x80) { - printk(" shared mem disabled - giving up\n"); - retval = -ENXIO; - goto out1; - } - dev->mem_start = 0xC0000 + mem_bits*0x4000; - printk(" using "); - } else { - printk(" assigning "); - } - - ei_status.mem = ioremap(dev->mem_start, (ES_STOP_PG - ES_START_PG)*256); - if (!ei_status.mem) { - printk("ioremap failed - giving up\n"); - retval = -ENXIO; - goto out1; - } - - dev->mem_end = dev->mem_start + (ES_STOP_PG - ES_START_PG)*256; - - printk("mem %#lx-%#lx\n", dev->mem_start, dev->mem_end-1); - -#if ES_DEBUG & ES_D_PROBE - if (inb(ioaddr + ES_CFG5)) - printk("es3210: Warning - DMA channel enabled, but not used here.\n"); -#endif - /* Note, point at the 8390, and not the card... */ - dev->base_addr = ioaddr + ES_NIC_OFFSET; - - ei_status.name = "ES3210"; - ei_status.tx_start_page = ES_START_PG; - ei_status.rx_start_page = ES_START_PG + TX_PAGES; - ei_status.stop_page = ES_STOP_PG; - ei_status.word16 = 1; - - if (ei_debug > 0) - printk(version); - - ei_status.reset_8390 = &es_reset_8390; - ei_status.block_input = &es_block_input; - ei_status.block_output = &es_block_output; - ei_status.get_8390_hdr = &es_get_8390_hdr; - - dev->netdev_ops = &ei_netdev_ops; - NS8390_init(dev, 0); - - retval = register_netdev(dev); - if (retval) - goto out1; - return 0; -out1: - free_irq(dev->irq, dev); -out: - release_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT); - return retval; -} - -/* - * Reset as per the packet driver method. Judging by the EISA cfg - * file, this just toggles the "Board Enable" bits (bit 2 and 0). - */ - -static void es_reset_8390(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - unsigned long end; - - outb(0x04, ioaddr + ES_RESET_PORT); - if (ei_debug > 1) printk("%s: resetting the ES3210...", dev->name); - - end = jiffies + 2*HZ/100; - while ((signed)(end - jiffies) > 0) continue; - - ei_status.txing = 0; - outb(0x01, ioaddr + ES_RESET_PORT); - if (ei_debug > 1) printk("reset done\n"); -} - -/* - * Note: In the following three functions is the implicit assumption - * that the associated memcpy will only use "rep; movsl" as long as - * we keep the counts as some multiple of doublewords. This is a - * requirement of the hardware, and also prevents us from using - * eth_io_copy_and_sum() since we can't guarantee it will limit - * itself to doubleword access. - */ - -/* - * Grab the 8390 specific header. Similar to the block_input routine, but - * we don't need to be concerned with ring wrap as the header will be at - * the start of a page, so we optimize accordingly. (A single doubleword.) - */ - -static void -es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - void __iomem *hdr_start = ei_status.mem + ((ring_page - ES_START_PG)<<8); - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); - hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ -} - -/* - * Block input and output are easy on shared memory ethercards, the only - * complication is when the ring buffer wraps. The count will already - * be rounded up to a doubleword value via es_get_8390_hdr() above. - */ - -static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, - int ring_offset) -{ - void __iomem *xfer_start = ei_status.mem + ring_offset - ES_START_PG*256; - - if (ring_offset + count > ES_STOP_PG*256) { - /* Packet wraps over end of ring buffer. */ - int semi_count = ES_STOP_PG*256 - ring_offset; - memcpy_fromio(skb->data, xfer_start, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, ei_status.mem, count); - } else { - /* Packet is in one chunk. */ - memcpy_fromio(skb->data, xfer_start, count); - } -} - -static void es_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - void __iomem *shmem = ei_status.mem + ((start_page - ES_START_PG)<<8); - - count = (count + 3) & ~3; /* Round up to doubleword */ - memcpy_toio(shmem, buf, count); -} - -#ifdef MODULE -#define MAX_ES_CARDS 4 /* Max number of ES3210 cards per module */ -#define NAMELEN 8 /* # of chars for storing dev->name */ -static struct net_device *dev_es3210[MAX_ES_CARDS]; -static int io[MAX_ES_CARDS]; -static int irq[MAX_ES_CARDS]; -static int mem[MAX_ES_CARDS]; - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s)"); -MODULE_PARM_DESC(mem, "memory base address(es)"); -MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver"); -MODULE_LICENSE("GPL"); - -int __init init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) { - if (io[this_dev] == 0 && this_dev != 0) - break; - dev = alloc_ei_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - dev->mem_start = mem[this_dev]; - if (do_es_probe(dev) == 0) { - dev_es3210[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "es3210.c: No es3210 card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - free_irq(dev->irq, dev); - release_region(dev->base_addr, ES_IO_EXTENT); - iounmap(ei_status.mem); -} - -void __exit -cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) { - struct net_device *dev = dev_es3210[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ - diff --git a/drivers/net/ethernet/8390/hp-plus.c b/drivers/net/ethernet/8390/hp-plus.c deleted file mode 100644 index 52f70f999c00..000000000000 --- a/drivers/net/ethernet/8390/hp-plus.c +++ /dev/null @@ -1,505 +0,0 @@ -/* hp-plus.c: A HP PCLAN/plus ethernet driver for linux. */ -/* - Written 1994 by Donald Becker. - - This driver is for the Hewlett Packard PC LAN (27***) plus ethercards. - These cards are sold under several model numbers, usually 2724*. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - As is often the case, a great deal of credit is owed to Russ Nelson. - The Crynwr packet driver was my primary source of HP-specific - programming information. -*/ - -static const char version[] = -"hp-plus.c:v1.10 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#include <linux/module.h> - -#include <linux/string.h> /* Important -- this inlines word moves. */ -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/delay.h> - -#include <asm/io.h> - -#include "8390.h" - -#define DRV_NAME "hp-plus" - -/* A zero-terminated list of I/O addresses to be probed. */ -static unsigned int hpplus_portlist[] __initdata = -{0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0}; - -/* - The HP EtherTwist chip implementation is a fairly routine DP8390 - implementation. It allows both shared memory and programmed-I/O buffer - access, using a custom interface for both. The programmed-I/O mode is - entirely implemented in the HP EtherTwist chip, bypassing the problem - ridden built-in 8390 facilities used on NE2000 designs. The shared - memory mode is likewise special, with an offset register used to make - packets appear at the shared memory base. Both modes use a base and bounds - page register to hide the Rx ring buffer wrap -- a packet that spans the - end of physical buffer memory appears continuous to the driver. (c.f. the - 3c503 and Cabletron E2100) - - A special note: the internal buffer of the board is only 8 bits wide. - This lays several nasty traps for the unaware: - - the 8390 must be programmed for byte-wide operations - - all I/O and memory operations must work on whole words (the access - latches are serially preloaded and have no byte-swapping ability). - - This board is laid out in I/O space much like the earlier HP boards: - the first 16 locations are for the board registers, and the second 16 are - for the 8390. The board is easy to identify, with both a dedicated 16 bit - ID register and a constant 0x530* value in the upper bits of the paging - register. -*/ - -#define HP_ID 0x00 /* ID register, always 0x4850. */ -#define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */ -#define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option. */ -#define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page. */ -#define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page. */ -#define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page. */ -#define NIC_OFFSET 0x10 /* Offset to the 8390 registers. */ -#define HP_IO_EXTENT 32 - -#define HP_START_PG 0x00 /* First page of TX buffer */ -#define HP_STOP_PG 0x80 /* Last page +1 of RX ring */ - -/* The register set selected in HP_PAGING. */ -enum PageName { - Perf_Page = 0, /* Normal operation. */ - MAC_Page = 1, /* The ethernet address (+checksum). */ - HW_Page = 2, /* EEPROM-loaded hardware parameters. */ - LAN_Page = 4, /* Transceiver selection, testing, etc. */ - ID_Page = 6 }; - -/* The bit definitions for the HPP_OPTION register. */ -enum HP_Option { - NICReset = 1, ChipReset = 2, /* Active low, really UNreset. */ - EnableIRQ = 4, FakeIntr = 8, BootROMEnb = 0x10, IOEnb = 0x20, - MemEnable = 0x40, ZeroWait = 0x80, MemDisable = 0x1000, }; - -static int hpp_probe1(struct net_device *dev, int ioaddr); - -static void hpp_reset_8390(struct net_device *dev); -static int hpp_open(struct net_device *dev); -static int hpp_close(struct net_device *dev); -static void hpp_mem_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void hpp_mem_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); -static void hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void hpp_io_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void hpp_io_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); -static void hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); - - -/* Probe a list of addresses for an HP LAN+ adaptor. - This routine is almost boilerplate. */ - -static int __init do_hpp_probe(struct net_device *dev) -{ - int i; - int base_addr = dev->base_addr; - int irq = dev->irq; - - if (base_addr > 0x1ff) /* Check a single specified location. */ - return hpp_probe1(dev, base_addr); - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - for (i = 0; hpplus_portlist[i]; i++) { - if (hpp_probe1(dev, hpplus_portlist[i]) == 0) - return 0; - dev->irq = irq; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init hp_plus_probe(int unit) -{ - struct net_device *dev = alloc_eip_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_hpp_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops hpp_netdev_ops = { - .ndo_open = hpp_open, - .ndo_stop = hpp_close, - .ndo_start_xmit = eip_start_xmit, - .ndo_tx_timeout = eip_tx_timeout, - .ndo_get_stats = eip_get_stats, - .ndo_set_rx_mode = eip_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = eip_poll, -#endif -}; - - -/* Do the interesting part of the probe at a single address. */ -static int __init hpp_probe1(struct net_device *dev, int ioaddr) -{ - int i, retval; - unsigned char checksum = 0; - const char name[] = "HP-PC-LAN+"; - int mem_start; - static unsigned version_printed; - - if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - /* Check for the HP+ signature, 50 48 0x 53. */ - if (inw(ioaddr + HP_ID) != 0x4850 || - (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300) { - retval = -ENODEV; - goto out; - } - - if (ei_debug && version_printed++ == 0) - printk(version); - - printk("%s: %s at %#3x, ", dev->name, name, ioaddr); - - /* Retrieve and checksum the station address. */ - outw(MAC_Page, ioaddr + HP_PAGING); - - for(i = 0; i < ETH_ALEN; i++) { - unsigned char inval = inb(ioaddr + 8 + i); - dev->dev_addr[i] = inval; - checksum += inval; - } - checksum += inb(ioaddr + 14); - - printk("%pM", dev->dev_addr); - - if (checksum != 0xff) { - printk(" bad checksum %2.2x.\n", checksum); - retval = -ENODEV; - goto out; - } else { - /* Point at the Software Configuration Flags. */ - outw(ID_Page, ioaddr + HP_PAGING); - printk(" ID %4.4x", inw(ioaddr + 12)); - } - - /* Read the IRQ line. */ - outw(HW_Page, ioaddr + HP_PAGING); - { - int irq = inb(ioaddr + 13) & 0x0f; - int option = inw(ioaddr + HPP_OPTION); - - dev->irq = irq; - if (option & MemEnable) { - mem_start = inw(ioaddr + 9) << 8; - printk(", IRQ %d, memory address %#x.\n", irq, mem_start); - } else { - mem_start = 0; - printk(", IRQ %d, programmed-I/O mode.\n", irq); - } - } - - /* Set the wrap registers for string I/O reads. */ - outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14); - - /* Set the base address to point to the NIC, not the "real" base! */ - dev->base_addr = ioaddr + NIC_OFFSET; - - dev->netdev_ops = &hpp_netdev_ops; - - ei_status.name = name; - ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */ - ei_status.tx_start_page = HP_START_PG; - ei_status.rx_start_page = HP_START_PG + TX_PAGES/2; - ei_status.stop_page = HP_STOP_PG; - - ei_status.reset_8390 = &hpp_reset_8390; - ei_status.block_input = &hpp_io_block_input; - ei_status.block_output = &hpp_io_block_output; - ei_status.get_8390_hdr = &hpp_io_get_8390_hdr; - - /* Check if the memory_enable flag is set in the option register. */ - if (mem_start) { - ei_status.block_input = &hpp_mem_block_input; - ei_status.block_output = &hpp_mem_block_output; - ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr; - dev->mem_start = mem_start; - ei_status.mem = ioremap(mem_start, - (HP_STOP_PG - HP_START_PG)*256); - if (!ei_status.mem) { - retval = -ENOMEM; - goto out; - } - ei_status.rmem_start = dev->mem_start + TX_PAGES/2*256; - dev->mem_end = ei_status.rmem_end - = dev->mem_start + (HP_STOP_PG - HP_START_PG)*256; - } - - outw(Perf_Page, ioaddr + HP_PAGING); - NS8390p_init(dev, 0); - /* Leave the 8390 and HP chip reset. */ - outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION); - - retval = register_netdev(dev); - if (retval) - goto out1; - return 0; -out1: - iounmap(ei_status.mem); -out: - release_region(ioaddr, HP_IO_EXTENT); - return retval; -} - -static int -hpp_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - int option_reg; - int retval; - - if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) { - return retval; - } - - /* Reset the 8390 and HP chip. */ - option_reg = inw(ioaddr + HPP_OPTION); - outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION); - udelay(5); - /* Unreset the board and enable interrupts. */ - outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION); - - /* Set the wrap registers for programmed-I/O operation. */ - outw(HW_Page, ioaddr + HP_PAGING); - outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14); - - /* Select the operational page. */ - outw(Perf_Page, ioaddr + HP_PAGING); - - return eip_open(dev); -} - -static int -hpp_close(struct net_device *dev) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - int option_reg = inw(ioaddr + HPP_OPTION); - - free_irq(dev->irq, dev); - eip_close(dev); - outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset, - ioaddr + HPP_OPTION); - - return 0; -} - -static void -hpp_reset_8390(struct net_device *dev) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - int option_reg = inw(ioaddr + HPP_OPTION); - - if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies); - - outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION); - /* Pause a few cycles for the hardware reset to take place. */ - udelay(5); - ei_status.txing = 0; - outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION); - - udelay(5); - - - if ((inb_p(ioaddr+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0) - printk("%s: hp_reset_8390() did not complete.\n", dev->name); - - if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies); -} - -/* The programmed-I/O version of reading the 4 byte 8390 specific header. - Note that transfer with the EtherTwist+ must be on word boundaries. */ - -static void -hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - - outw((ring_page<<8), ioaddr + HPP_IN_ADDR); - insw(ioaddr + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); -} - -/* Block input and output, similar to the Crynwr packet driver. */ - -static void -hpp_io_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - char *buf = skb->data; - - outw(ring_offset, ioaddr + HPP_IN_ADDR); - insw(ioaddr + HP_DATAPORT, buf, count>>1); - if (count & 0x01) - buf[count-1] = inw(ioaddr + HP_DATAPORT); -} - -/* The corresponding shared memory versions of the above 2 functions. */ - -static void -hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - int option_reg = inw(ioaddr + HPP_OPTION); - - outw((ring_page<<8), ioaddr + HPP_IN_ADDR); - outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); - memcpy_fromio(hdr, ei_status.mem, sizeof(struct e8390_pkt_hdr)); - outw(option_reg, ioaddr + HPP_OPTION); - hdr->count = (le16_to_cpu(hdr->count) + 3) & ~3; /* Round up allocation. */ -} - -static void -hpp_mem_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - int option_reg = inw(ioaddr + HPP_OPTION); - - outw(ring_offset, ioaddr + HPP_IN_ADDR); - - outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); - - /* Caution: this relies on get_8390_hdr() rounding up count! - Also note that we *can't* use eth_io_copy_and_sum() because - it will not always copy "count" bytes (e.g. padded IP). */ - - memcpy_fromio(skb->data, ei_status.mem, count); - outw(option_reg, ioaddr + HPP_OPTION); -} - -/* A special note: we *must* always transfer >=16 bit words. - It's always safe to round up, so we do. */ -static void -hpp_io_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - outw(start_page << 8, ioaddr + HPP_OUT_ADDR); - outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2); -} - -static void -hpp_mem_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - int option_reg = inw(ioaddr + HPP_OPTION); - - outw(start_page << 8, ioaddr + HPP_OUT_ADDR); - outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); - memcpy_toio(ei_status.mem, buf, (count + 3) & ~3); - outw(option_reg, ioaddr + HPP_OPTION); -} - - -#ifdef MODULE -#define MAX_HPP_CARDS 4 /* Max number of HPP cards per module */ -static struct net_device *dev_hpp[MAX_HPP_CARDS]; -static int io[MAX_HPP_CARDS]; -static int irq[MAX_HPP_CARDS]; - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O port address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s); ignored if properly detected"); -MODULE_DESCRIPTION("HP PC-LAN+ ISA ethernet driver"); -MODULE_LICENSE("GPL"); - -/* This is set up so that only a single autoprobe takes place per call. -ISA device autoprobes on a running machine are not recommended. */ -int __init -init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) { - if (io[this_dev] == 0) { - if (this_dev != 0) break; /* only autoprobe 1st one */ - printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n"); - } - dev = alloc_eip_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - if (do_hpp_probe(dev) == 0) { - dev_hpp[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "hp-plus.c: No HP-Plus card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - /* NB: hpp_close() handles free_irq */ - iounmap(ei_status.mem); - release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT); -} - -void __exit -cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) { - struct net_device *dev = dev_hpp[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/hp.c b/drivers/net/ethernet/8390/hp.c deleted file mode 100644 index 37fa89aa4578..000000000000 --- a/drivers/net/ethernet/8390/hp.c +++ /dev/null @@ -1,438 +0,0 @@ -/* hp.c: A HP LAN ethernet driver for linux. */ -/* - Written 1993-94 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - This is a driver for the HP PC-LAN adaptors. - - Sources: - The Crynwr packet driver. -*/ - -static const char version[] = - "hp.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/delay.h> - -#include <asm/io.h> - -#include "8390.h" - -#define DRV_NAME "hp" - -/* A zero-terminated list of I/O addresses to be probed. */ -static unsigned int hppclan_portlist[] __initdata = -{ 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240, 0}; - -#define HP_IO_EXTENT 32 - -#define HP_DATAPORT 0x0c /* "Remote DMA" data port. */ -#define HP_ID 0x07 -#define HP_CONFIGURE 0x08 /* Configuration register. */ -#define HP_RUN 0x01 /* 1 == Run, 0 == reset. */ -#define HP_IRQ 0x0E /* Mask for software-configured IRQ line. */ -#define HP_DATAON 0x10 /* Turn on dataport */ -#define NIC_OFFSET 0x10 /* Offset the 8390 registers. */ - -#define HP_START_PG 0x00 /* First page of TX buffer */ -#define HP_8BSTOP_PG 0x80 /* Last page +1 of RX ring */ -#define HP_16BSTOP_PG 0xFF /* Same, for 16 bit cards. */ - -static int hp_probe1(struct net_device *dev, int ioaddr); - -static void hp_reset_8390(struct net_device *dev); -static void hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void hp_block_input(struct net_device *dev, int count, - struct sk_buff *skb , int ring_offset); -static void hp_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); - -static void hp_init_card(struct net_device *dev); - -/* The map from IRQ number to HP_CONFIGURE register setting. */ -/* My default is IRQ5 0 1 2 3 4 5 6 7 8 9 10 11 */ -static char irqmap[16] __initdata= { 0, 0, 4, 6, 8,10, 0,14, 0, 4, 2,12,0,0,0,0}; - - -/* Probe for an HP LAN adaptor. - Also initialize the card and fill in STATION_ADDR with the station - address. */ - -static int __init do_hp_probe(struct net_device *dev) -{ - int i; - int base_addr = dev->base_addr; - int irq = dev->irq; - - if (base_addr > 0x1ff) /* Check a single specified location. */ - return hp_probe1(dev, base_addr); - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - for (i = 0; hppclan_portlist[i]; i++) { - if (hp_probe1(dev, hppclan_portlist[i]) == 0) - return 0; - dev->irq = irq; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init hp_probe(int unit) -{ - struct net_device *dev = alloc_eip_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_hp_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static int __init hp_probe1(struct net_device *dev, int ioaddr) -{ - int i, retval, board_id, wordmode; - const char *name; - static unsigned version_printed; - - if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - /* Check for the HP physical address, 08 00 09 xx xx xx. */ - /* This really isn't good enough: we may pick up HP LANCE boards - also! Avoid the lance 0x5757 signature. */ - if (inb(ioaddr) != 0x08 - || inb(ioaddr+1) != 0x00 - || inb(ioaddr+2) != 0x09 - || inb(ioaddr+14) == 0x57) { - retval = -ENODEV; - goto out; - } - - /* Set up the parameters based on the board ID. - If you have additional mappings, please mail them to me -djb. */ - if ((board_id = inb(ioaddr + HP_ID)) & 0x80) { - name = "HP27247"; - wordmode = 1; - } else { - name = "HP27250"; - wordmode = 0; - } - - if (ei_debug && version_printed++ == 0) - printk(version); - - printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr); - - for(i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = inb(ioaddr + i); - - printk(" %pM", dev->dev_addr); - - /* Snarf the interrupt now. Someday this could be moved to open(). */ - if (dev->irq < 2) { - static const int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0}; - static const int irq_8list[] = { 7, 5, 3, 4, 9, 0}; - const int *irqp = wordmode ? irq_16list : irq_8list; - do { - int irq = *irqp; - if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) { - unsigned long cookie = probe_irq_on(); - /* Twinkle the interrupt, and check if it's seen. */ - outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE); - outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE); - if (irq == probe_irq_off(cookie) /* It's a good IRQ line! */ - && request_irq (irq, eip_interrupt, 0, DRV_NAME, dev) == 0) { - printk(" selecting IRQ %d.\n", irq); - dev->irq = *irqp; - break; - } - } - } while (*++irqp); - if (*irqp == 0) { - printk(" no free IRQ lines.\n"); - retval = -EBUSY; - goto out; - } - } else { - if (dev->irq == 2) - dev->irq = 9; - if ((retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev))) { - printk (" unable to get IRQ %d.\n", dev->irq); - goto out; - } - } - - /* Set the base address to point to the NIC, not the "real" base! */ - dev->base_addr = ioaddr + NIC_OFFSET; - dev->netdev_ops = &eip_netdev_ops; - - ei_status.name = name; - ei_status.word16 = wordmode; - ei_status.tx_start_page = HP_START_PG; - ei_status.rx_start_page = HP_START_PG + TX_PAGES; - ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG; - - ei_status.reset_8390 = hp_reset_8390; - ei_status.get_8390_hdr = hp_get_8390_hdr; - ei_status.block_input = hp_block_input; - ei_status.block_output = hp_block_output; - hp_init_card(dev); - - retval = register_netdev(dev); - if (retval) - goto out1; - return 0; -out1: - free_irq(dev->irq, dev); -out: - release_region(ioaddr, HP_IO_EXTENT); - return retval; -} - -static void -hp_reset_8390(struct net_device *dev) -{ - int hp_base = dev->base_addr - NIC_OFFSET; - int saved_config = inb_p(hp_base + HP_CONFIGURE); - - if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies); - outb_p(0x00, hp_base + HP_CONFIGURE); - ei_status.txing = 0; - /* Pause just a few cycles for the hardware reset to take place. */ - udelay(5); - - outb_p(saved_config, hp_base + HP_CONFIGURE); - udelay(5); - - if ((inb_p(hp_base+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0) - printk("%s: hp_reset_8390() did not complete.\n", dev->name); - - if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies); -} - -static void -hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - int nic_base = dev->base_addr; - int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE); - - outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE); - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base); - outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); - outb_p(0, nic_base + EN0_RCNTHI); - outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ - outb_p(ring_page, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base); - - if (ei_status.word16) - insw(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); - else - insb(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); - - outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE); -} - -/* Block input and output, similar to the Crynwr packet driver. If you are - porting to a new ethercard look at the packet driver source for hints. - The HP LAN doesn't use shared memory -- we put the packet - out through the "remote DMA" dataport. */ - -static void -hp_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - int nic_base = dev->base_addr; - int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE); - int xfer_count = count; - char *buf = skb->data; - - outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE); - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base); - outb_p(count & 0xff, nic_base + EN0_RCNTLO); - outb_p(count >> 8, nic_base + EN0_RCNTHI); - outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); - outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base); - if (ei_status.word16) { - insw(nic_base - NIC_OFFSET + HP_DATAPORT,buf,count>>1); - if (count & 0x01) - buf[count-1] = inb(nic_base - NIC_OFFSET + HP_DATAPORT), xfer_count++; - } else { - insb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count); - } - /* This is for the ALPHA version only, remove for later releases. */ - if (ei_debug > 0) { /* DMA termination address check... */ - int high = inb_p(nic_base + EN0_RSARHI); - int low = inb_p(nic_base + EN0_RSARLO); - int addr = (high << 8) + low; - /* Check only the lower 8 bits so we can ignore ring wrap. */ - if (((ring_offset + xfer_count) & 0xff) != (addr & 0xff)) - printk("%s: RX transfer address mismatch, %#4.4x vs. %#4.4x (actual).\n", - dev->name, ring_offset + xfer_count, addr); - } - outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE); -} - -static void -hp_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - int nic_base = dev->base_addr; - int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE); - - outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE); - /* Round the count up for word writes. Do we need to do this? - What effect will an odd byte count have on the 8390? - I should check someday. */ - if (ei_status.word16 && (count & 0x01)) - count++; - /* We should already be in page 0, but to be safe... */ - outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base); - -#ifdef NE8390_RW_BUGFIX - /* Handle the read-before-write bug the same way as the - Crynwr packet driver -- the NatSemi method doesn't work. */ - outb_p(0x42, nic_base + EN0_RCNTLO); - outb_p(0, nic_base + EN0_RCNTHI); - outb_p(0xff, nic_base + EN0_RSARLO); - outb_p(0x00, nic_base + EN0_RSARHI); -#define NE_CMD 0x00 - outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); - /* Make certain that the dummy read has occurred. */ - inb_p(0x61); - inb_p(0x61); -#endif - - outb_p(count & 0xff, nic_base + EN0_RCNTLO); - outb_p(count >> 8, nic_base + EN0_RCNTHI); - outb_p(0x00, nic_base + EN0_RSARLO); - outb_p(start_page, nic_base + EN0_RSARHI); - - outb_p(E8390_RWRITE+E8390_START, nic_base); - if (ei_status.word16) { - /* Use the 'rep' sequence for 16 bit boards. */ - outsw(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count>>1); - } else { - outsb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count); - } - - /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken! */ - - /* This is for the ALPHA version only, remove for later releases. */ - if (ei_debug > 0) { /* DMA termination address check... */ - int high = inb_p(nic_base + EN0_RSARHI); - int low = inb_p(nic_base + EN0_RSARLO); - int addr = (high << 8) + low; - if ((start_page << 8) + count != addr) - printk("%s: TX Transfer address mismatch, %#4.4x vs. %#4.4x.\n", - dev->name, (start_page << 8) + count, addr); - } - outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE); -} - -/* This function resets the ethercard if something screws up. */ -static void __init -hp_init_card(struct net_device *dev) -{ - int irq = dev->irq; - NS8390p_init(dev, 0); - outb_p(irqmap[irq&0x0f] | HP_RUN, - dev->base_addr - NIC_OFFSET + HP_CONFIGURE); -} - -#ifdef MODULE -#define MAX_HP_CARDS 4 /* Max number of HP cards per module */ -static struct net_device *dev_hp[MAX_HP_CARDS]; -static int io[MAX_HP_CARDS]; -static int irq[MAX_HP_CARDS]; - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); -MODULE_DESCRIPTION("HP PC-LAN ISA ethernet driver"); -MODULE_LICENSE("GPL"); - -/* This is set up so that only a single autoprobe takes place per call. -ISA device autoprobes on a running machine are not recommended. */ -int __init -init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) { - if (io[this_dev] == 0) { - if (this_dev != 0) break; /* only autoprobe 1st one */ - printk(KERN_NOTICE "hp.c: Presently autoprobing (not recommended) for a single card.\n"); - } - dev = alloc_eip_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - if (do_hp_probe(dev) == 0) { - dev_hp[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "hp.c: No HP card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - free_irq(dev->irq, dev); - release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT); -} - -void __exit -cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) { - struct net_device *dev = dev_hp[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/lne390.c b/drivers/net/ethernet/8390/lne390.c deleted file mode 100644 index 479409bf2e3c..000000000000 --- a/drivers/net/ethernet/8390/lne390.c +++ /dev/null @@ -1,433 +0,0 @@ -/* - lne390.c - - Linux driver for Mylex LNE390 EISA Network Adapter - - Copyright (C) 1996-1998, Paul Gortmaker. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - Information and Code Sources: - - 1) Based upon framework of es3210 driver. - 2) The existing myriad of other Linux 8390 drivers by Donald Becker. - 3) Russ Nelson's asm packet driver provided additional info. - 4) Info for getting IRQ and sh-mem gleaned from the EISA cfg files. - - The LNE390 is an EISA shared memory NS8390 implementation. Note - that all memory copies to/from the board must be 32bit transfers. - There are two versions of the card: the lne390a and the lne390b. - Going by the EISA cfg files, the "a" has jumpers to select between - BNC/AUI, but the "b" also has RJ-45 and selection is via the SCU. - The shared memory address selection is also slightly different. - Note that shared memory address > 1MB are supported with this driver. - - You can try <http://www.mylex.com> if you want more info, as I've - never even seen one of these cards. :) - - Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 2000/09/01 - - get rid of check_region - - no need to check if dev == NULL in lne390_probe1 -*/ - -static const char *version = - "lne390.c: Driver revision v0.99.1, 01/09/2000\n"; - -#include <linux/module.h> -#include <linux/eisa.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> - -#include <asm/io.h> - -#include "8390.h" - -#define DRV_NAME "lne390" - -static int lne390_probe1(struct net_device *dev, int ioaddr); - -static void lne390_reset_8390(struct net_device *dev); - -static void lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); -static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); -static void lne390_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page); - -#define LNE390_START_PG 0x00 /* First page of TX buffer */ -#define LNE390_STOP_PG 0x80 /* Last page +1 of RX ring */ - -#define LNE390_ID_PORT 0xc80 /* Same for all EISA cards */ -#define LNE390_IO_EXTENT 0x20 -#define LNE390_SA_PROM 0x16 /* Start of e'net addr. */ -#define LNE390_RESET_PORT 0xc84 /* From the pkt driver source */ -#define LNE390_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */ - -#define LNE390_ADDR0 0x00 /* 3 byte vendor prefix */ -#define LNE390_ADDR1 0x80 -#define LNE390_ADDR2 0xe5 - -#define LNE390_ID0 0x10009835 /* 0x3598 = 01101 01100 11000 = mlx */ -#define LNE390_ID1 0x11009835 /* above is the 390A, this is 390B */ - -#define LNE390_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */ -#define LNE390_CFG2 0xc90 - -/* - * You can OR any of the following bits together and assign it - * to LNE390_DEBUG to get verbose driver info during operation. - * Currently only the probe one is implemented. - */ - -#define LNE390_D_PROBE 0x01 -#define LNE390_D_RX_PKT 0x02 -#define LNE390_D_TX_PKT 0x04 -#define LNE390_D_IRQ 0x08 - -#define LNE390_DEBUG 0 - -static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; -static unsigned int shmem_mapA[] __initdata = {0xff, 0xfe, 0xfd, 0xfff, 0xffe, 0xffc, 0x0d, 0x0}; -static unsigned int shmem_mapB[] __initdata = {0xff, 0xfe, 0x0e, 0xfff, 0xffe, 0xffc, 0x0d, 0x0}; - -/* - * Probe for the card. The best way is to read the EISA ID if it - * is known. Then we can check the prefix of the station address - * PROM for a match against the value assigned to Mylex. - */ - -static int __init do_lne390_probe(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - int irq = dev->irq; - int mem_start = dev->mem_start; - int ret; - - if (ioaddr > 0x1ff) { /* Check a single specified location. */ - if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME)) - return -EBUSY; - ret = lne390_probe1(dev, ioaddr); - if (ret) - release_region(ioaddr, LNE390_IO_EXTENT); - return ret; - } - else if (ioaddr > 0) /* Don't probe at all. */ - return -ENXIO; - - if (!EISA_bus) { -#if LNE390_DEBUG & LNE390_D_PROBE - printk("lne390-debug: Not an EISA bus. Not probing high ports.\n"); -#endif - return -ENXIO; - } - - /* EISA spec allows for up to 16 slots, but 8 is typical. */ - for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { - if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME)) - continue; - if (lne390_probe1(dev, ioaddr) == 0) - return 0; - release_region(ioaddr, LNE390_IO_EXTENT); - dev->irq = irq; - dev->mem_start = mem_start; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init lne390_probe(int unit) -{ - struct net_device *dev = alloc_ei_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_lne390_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static int __init lne390_probe1(struct net_device *dev, int ioaddr) -{ - int i, revision, ret; - unsigned long eisa_id; - - if (inb_p(ioaddr + LNE390_ID_PORT) == 0xff) return -ENODEV; - -#if LNE390_DEBUG & LNE390_D_PROBE - printk("lne390-debug: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + LNE390_ID_PORT)); - printk("lne390-debug: config regs: %#x %#x\n", - inb(ioaddr + LNE390_CFG1), inb(ioaddr + LNE390_CFG2)); -#endif - - -/* Check the EISA ID of the card. */ - eisa_id = inl(ioaddr + LNE390_ID_PORT); - if ((eisa_id != LNE390_ID0) && (eisa_id != LNE390_ID1)) { - return -ENODEV; - } - - revision = (eisa_id >> 24) & 0x01; /* 0 = rev A, 1 rev B */ - -#if 0 -/* Check the Mylex vendor ID as well. Not really required. */ - if (inb(ioaddr + LNE390_SA_PROM + 0) != LNE390_ADDR0 - || inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1 - || inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) { - printk("lne390.c: card not found"); - for (i = 0; i < ETH_ALEN; i++) - printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i)); - printk(" (invalid prefix).\n"); - return -ENODEV; - } -#endif - - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i); - printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n", - 0xa+revision, ioaddr/0x1000, dev->dev_addr); - - printk("lne390.c: "); - - /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ - if (dev->irq == 0) { - unsigned char irq_reg = inb(ioaddr + LNE390_CFG2) >> 3; - dev->irq = irq_map[irq_reg & 0x07]; - printk("using"); - } else { - /* This is useless unless we reprogram the card here too */ - if (dev->irq == 2) dev->irq = 9; /* Doh! */ - printk("assigning"); - } - printk(" IRQ %d,", dev->irq); - - if ((ret = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev))) { - printk (" unable to get IRQ %d.\n", dev->irq); - return ret; - } - - if (dev->mem_start == 0) { - unsigned char mem_reg = inb(ioaddr + LNE390_CFG2) & 0x07; - - if (revision) /* LNE390B */ - dev->mem_start = shmem_mapB[mem_reg] * 0x10000; - else /* LNE390A */ - dev->mem_start = shmem_mapA[mem_reg] * 0x10000; - printk(" using "); - } else { - /* Should check for value in shmem_map and reprogram the card to use it */ - dev->mem_start &= 0xfff0000; - printk(" assigning "); - } - - printk("%dkB memory at physical address %#lx\n", - LNE390_STOP_PG/4, dev->mem_start); - - /* - BEWARE!! Some dain-bramaged EISA SCUs will allow you to put - the card mem within the region covered by `normal' RAM !!! - - ioremap() will fail in that case. - */ - ei_status.mem = ioremap(dev->mem_start, LNE390_STOP_PG*0x100); - if (!ei_status.mem) { - printk(KERN_ERR "lne390.c: Unable to remap card memory above 1MB !!\n"); - printk(KERN_ERR "lne390.c: Try using EISA SCU to set memory below 1MB.\n"); - printk(KERN_ERR "lne390.c: Driver NOT installed.\n"); - ret = -EAGAIN; - goto cleanup; - } - printk("lne390.c: remapped %dkB card memory to virtual address %p\n", - LNE390_STOP_PG/4, ei_status.mem); - - dev->mem_start = (unsigned long)ei_status.mem; - dev->mem_end = dev->mem_start + (LNE390_STOP_PG - LNE390_START_PG)*256; - - /* The 8390 offset is zero for the LNE390 */ - dev->base_addr = ioaddr; - - ei_status.name = "LNE390"; - ei_status.tx_start_page = LNE390_START_PG; - ei_status.rx_start_page = LNE390_START_PG + TX_PAGES; - ei_status.stop_page = LNE390_STOP_PG; - ei_status.word16 = 1; - - if (ei_debug > 0) - printk(version); - - ei_status.reset_8390 = &lne390_reset_8390; - ei_status.block_input = &lne390_block_input; - ei_status.block_output = &lne390_block_output; - ei_status.get_8390_hdr = &lne390_get_8390_hdr; - - dev->netdev_ops = &ei_netdev_ops; - NS8390_init(dev, 0); - - ret = register_netdev(dev); - if (ret) - goto unmap; - return 0; -unmap: - if (ei_status.reg0) - iounmap(ei_status.mem); -cleanup: - free_irq(dev->irq, dev); - return ret; -} - -/* - * Reset as per the packet driver method. Judging by the EISA cfg - * file, this just toggles the "Board Enable" bits (bit 2 and 0). - */ - -static void lne390_reset_8390(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - - outb(0x04, ioaddr + LNE390_RESET_PORT); - if (ei_debug > 1) printk("%s: resetting the LNE390...", dev->name); - - mdelay(2); - - ei_status.txing = 0; - outb(0x01, ioaddr + LNE390_RESET_PORT); - if (ei_debug > 1) printk("reset done\n"); -} - -/* - * Note: In the following three functions is the implicit assumption - * that the associated memcpy will only use "rep; movsl" as long as - * we keep the counts as some multiple of doublewords. This is a - * requirement of the hardware, and also prevents us from using - * eth_io_copy_and_sum() since we can't guarantee it will limit - * itself to doubleword access. - */ - -/* - * Grab the 8390 specific header. Similar to the block_input routine, but - * we don't need to be concerned with ring wrap as the header will be at - * the start of a page, so we optimize accordingly. (A single doubleword.) - */ - -static void -lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - void __iomem *hdr_start = ei_status.mem + ((ring_page - LNE390_START_PG)<<8); - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); - hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ -} - -/* - * Block input and output are easy on shared memory ethercards, the only - * complication is when the ring buffer wraps. The count will already - * be rounded up to a doubleword value via lne390_get_8390_hdr() above. - */ - -static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb, - int ring_offset) -{ - void __iomem *xfer_start = ei_status.mem + ring_offset - (LNE390_START_PG<<8); - - if (ring_offset + count > (LNE390_STOP_PG<<8)) { - /* Packet wraps over end of ring buffer. */ - int semi_count = (LNE390_STOP_PG<<8) - ring_offset; - memcpy_fromio(skb->data, xfer_start, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, - ei_status.mem + (TX_PAGES<<8), count); - } else { - /* Packet is in one chunk. */ - memcpy_fromio(skb->data, xfer_start, count); - } -} - -static void lne390_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - void __iomem *shmem = ei_status.mem + ((start_page - LNE390_START_PG)<<8); - - count = (count + 3) & ~3; /* Round up to doubleword */ - memcpy_toio(shmem, buf, count); -} - - -#ifdef MODULE -#define MAX_LNE_CARDS 4 /* Max number of LNE390 cards per module */ -static struct net_device *dev_lne[MAX_LNE_CARDS]; -static int io[MAX_LNE_CARDS]; -static int irq[MAX_LNE_CARDS]; -static int mem[MAX_LNE_CARDS]; - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s)"); -MODULE_PARM_DESC(mem, "memory base address(es)"); -MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver"); -MODULE_LICENSE("GPL"); - -int __init init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) { - if (io[this_dev] == 0 && this_dev != 0) - break; - dev = alloc_ei_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - dev->mem_start = mem[this_dev]; - if (do_lne390_probe(dev) == 0) { - dev_lne[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "lne390.c: No LNE390 card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - free_irq(dev->irq, dev); - release_region(dev->base_addr, LNE390_IO_EXTENT); - iounmap(ei_status.mem); -} - -void __exit cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) { - struct net_device *dev = dev_lne[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ - diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index c0c127913dec..587a885de259 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -374,7 +374,6 @@ static int ne2k_pci_init_one(struct pci_dev *pdev, NS8390_init(dev, 0); memcpy(dev->dev_addr, SA_prom, dev->addr_len); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); i = register_netdev(dev); if (i) diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c deleted file mode 100644 index ebcdb52ec739..000000000000 --- a/drivers/net/ethernet/8390/ne3210.c +++ /dev/null @@ -1,346 +0,0 @@ -/* - ne3210.c - - Linux driver for Novell NE3210 EISA Network Adapter - - Copyright (C) 1998, Paul Gortmaker. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - Information and Code Sources: - - 1) Based upon my other EISA 8390 drivers (lne390, es3210, smc-ultra32) - 2) The existing myriad of other Linux 8390 drivers by Donald Becker. - 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg file - - The NE3210 is an EISA shared memory NS8390 implementation. Shared - memory address > 1MB should work with this driver. - - Note that the .cfg file (3/11/93, v1.0) has AUI and BNC switched - around (or perhaps there are some defective/backwards cards ???) - - This driver WILL NOT WORK FOR THE NE3200 - it is completely different - and does not use an 8390 at all. - - Updated to EISA probing API 5/2003 by Marc Zyngier. -*/ - -#include <linux/module.h> -#include <linux/eisa.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/mm.h> - -#include <asm/io.h> - -#include "8390.h" - -#define DRV_NAME "ne3210" - -static void ne3210_reset_8390(struct net_device *dev); - -static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); -static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); -static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page); - -#define NE3210_START_PG 0x00 /* First page of TX buffer */ -#define NE3210_STOP_PG 0x80 /* Last page +1 of RX ring */ - -#define NE3210_IO_EXTENT 0x20 -#define NE3210_SA_PROM 0x16 /* Start of e'net addr. */ -#define NE3210_RESET_PORT 0xc84 -#define NE3210_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */ - -#define NE3210_ADDR0 0x00 /* 3 byte vendor prefix */ -#define NE3210_ADDR1 0x00 -#define NE3210_ADDR2 0x1b - -#define NE3210_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */ -#define NE3210_CFG2 0xc90 -#define NE3210_CFG_EXTENT (NE3210_CFG2 - NE3210_CFG1 + 1) - -/* - * You can OR any of the following bits together and assign it - * to NE3210_DEBUG to get verbose driver info during operation. - * Currently only the probe one is implemented. - */ - -#define NE3210_D_PROBE 0x01 -#define NE3210_D_RX_PKT 0x02 -#define NE3210_D_TX_PKT 0x04 -#define NE3210_D_IRQ 0x08 - -#define NE3210_DEBUG 0x0 - -static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; -static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0}; -static const char * const ifmap[] __initconst = {"UTP", "?", "BNC", "AUI"}; -static int ifmap_val[] __initdata = { - IF_PORT_10BASET, - IF_PORT_UNKNOWN, - IF_PORT_10BASE2, - IF_PORT_AUI, -}; - -static int __init ne3210_eisa_probe (struct device *device) -{ - unsigned long ioaddr, phys_mem; - int i, retval, port_index; - struct eisa_device *edev = to_eisa_device (device); - struct net_device *dev; - - /* Allocate dev->priv and fill in 8390 specific dev fields. */ - if (!(dev = alloc_ei_netdev ())) { - printk ("ne3210.c: unable to allocate memory for dev!\n"); - return -ENOMEM; - } - - SET_NETDEV_DEV(dev, device); - dev_set_drvdata(device, dev); - ioaddr = edev->base_addr; - - if (!request_region(ioaddr, NE3210_IO_EXTENT, DRV_NAME)) { - retval = -EBUSY; - goto out; - } - - if (!request_region(ioaddr + NE3210_CFG1, - NE3210_CFG_EXTENT, DRV_NAME)) { - retval = -EBUSY; - goto out1; - } - -#if NE3210_DEBUG & NE3210_D_PROBE - printk("ne3210-debug: probe at %#x, ID %s\n", ioaddr, edev->id.sig); - printk("ne3210-debug: config regs: %#x %#x\n", - inb(ioaddr + NE3210_CFG1), inb(ioaddr + NE3210_CFG2)); -#endif - - port_index = inb(ioaddr + NE3210_CFG2) >> 6; - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i); - printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n", - edev->slot, ifmap[port_index], dev->dev_addr); - - /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ - dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07]; - printk("ne3210.c: using IRQ %d, ", dev->irq); - - retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); - if (retval) { - printk (" unable to get IRQ %d.\n", dev->irq); - goto out2; - } - - phys_mem = shmem_map[inb(ioaddr + NE3210_CFG2) & 0x07] * 0x1000; - - /* - BEWARE!! Some dain-bramaged EISA SCUs will allow you to put - the card mem within the region covered by `normal' RAM !!! - */ - if (phys_mem > 1024*1024) { /* phys addr > 1MB */ - if (phys_mem < virt_to_phys(high_memory)) { - printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n"); - printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n"); - printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n", - (u64)virt_to_phys(high_memory)); - printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n"); - retval = -EINVAL; - goto out3; - } - } - - if (!request_mem_region (phys_mem, NE3210_STOP_PG*0x100, DRV_NAME)) { - printk ("ne3210.c: Unable to request shared memory at physical address %#lx\n", - phys_mem); - goto out3; - } - - printk("%dkB memory at physical address %#lx\n", - NE3210_STOP_PG/4, phys_mem); - - ei_status.mem = ioremap(phys_mem, NE3210_STOP_PG*0x100); - if (!ei_status.mem) { - printk(KERN_ERR "ne3210.c: Unable to remap card memory !!\n"); - printk(KERN_ERR "ne3210.c: Driver NOT installed.\n"); - retval = -EAGAIN; - goto out4; - } - printk("ne3210.c: remapped %dkB card memory to virtual address %p\n", - NE3210_STOP_PG/4, ei_status.mem); - dev->mem_start = (unsigned long)ei_status.mem; - dev->mem_end = dev->mem_start + (NE3210_STOP_PG - NE3210_START_PG)*256; - - /* The 8390 offset is zero for the NE3210 */ - dev->base_addr = ioaddr; - - ei_status.name = "NE3210"; - ei_status.tx_start_page = NE3210_START_PG; - ei_status.rx_start_page = NE3210_START_PG + TX_PAGES; - ei_status.stop_page = NE3210_STOP_PG; - ei_status.word16 = 1; - ei_status.priv = phys_mem; - - if (ei_debug > 0) - printk("ne3210 loaded.\n"); - - ei_status.reset_8390 = &ne3210_reset_8390; - ei_status.block_input = &ne3210_block_input; - ei_status.block_output = &ne3210_block_output; - ei_status.get_8390_hdr = &ne3210_get_8390_hdr; - - dev->netdev_ops = &ei_netdev_ops; - - dev->if_port = ifmap_val[port_index]; - - if ((retval = register_netdev (dev))) - goto out5; - - NS8390_init(dev, 0); - return 0; - - out5: - iounmap(ei_status.mem); - out4: - release_mem_region (phys_mem, NE3210_STOP_PG*0x100); - out3: - free_irq (dev->irq, dev); - out2: - release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT); - out1: - release_region (ioaddr, NE3210_IO_EXTENT); - out: - free_netdev (dev); - - return retval; -} - -static int ne3210_eisa_remove(struct device *device) -{ - struct net_device *dev = dev_get_drvdata(device); - unsigned long ioaddr = to_eisa_device (device)->base_addr; - - unregister_netdev (dev); - iounmap(ei_status.mem); - release_mem_region (ei_status.priv, NE3210_STOP_PG*0x100); - free_irq (dev->irq, dev); - release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT); - release_region (ioaddr, NE3210_IO_EXTENT); - free_netdev (dev); - - return 0; -} - -/* - * Reset by toggling the "Board Enable" bits (bit 2 and 0). - */ - -static void ne3210_reset_8390(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - - outb(0x04, ioaddr + NE3210_RESET_PORT); - if (ei_debug > 1) printk("%s: resetting the NE3210...", dev->name); - - mdelay(2); - - ei_status.txing = 0; - outb(0x01, ioaddr + NE3210_RESET_PORT); - if (ei_debug > 1) printk("reset done\n"); -} - -/* - * Note: In the following three functions is the implicit assumption - * that the associated memcpy will only use "rep; movsl" as long as - * we keep the counts as some multiple of doublewords. This is a - * requirement of the hardware, and also prevents us from using - * eth_io_copy_and_sum() since we can't guarantee it will limit - * itself to doubleword access. - */ - -/* - * Grab the 8390 specific header. Similar to the block_input routine, but - * we don't need to be concerned with ring wrap as the header will be at - * the start of a page, so we optimize accordingly. (A single doubleword.) - */ - -static void -ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - void __iomem *hdr_start = ei_status.mem + ((ring_page - NE3210_START_PG)<<8); - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); - hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ -} - -/* - * Block input and output are easy on shared memory ethercards, the only - * complication is when the ring buffer wraps. The count will already - * be rounded up to a doubleword value via ne3210_get_8390_hdr() above. - */ - -static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, - int ring_offset) -{ - void __iomem *start = ei_status.mem + ring_offset - NE3210_START_PG*256; - - if (ring_offset + count > NE3210_STOP_PG*256) { - /* Packet wraps over end of ring buffer. */ - int semi_count = NE3210_STOP_PG*256 - ring_offset; - memcpy_fromio(skb->data, start, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, - ei_status.mem + TX_PAGES*256, count); - } else { - /* Packet is in one chunk. */ - memcpy_fromio(skb->data, start, count); - } -} - -static void ne3210_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - void __iomem *shmem = ei_status.mem + ((start_page - NE3210_START_PG)<<8); - - count = (count + 3) & ~3; /* Round up to doubleword */ - memcpy_toio(shmem, buf, count); -} - -static struct eisa_device_id ne3210_ids[] = { - { "EGL0101" }, - { "NVL1801" }, - { "" }, -}; -MODULE_DEVICE_TABLE(eisa, ne3210_ids); - -static struct eisa_driver ne3210_eisa_driver = { - .id_table = ne3210_ids, - .driver = { - .name = "ne3210", - .probe = ne3210_eisa_probe, - .remove = ne3210_eisa_remove, - }, -}; - -MODULE_DESCRIPTION("NE3210 EISA Ethernet driver"); -MODULE_LICENSE("GPL"); -MODULE_DEVICE_TABLE(eisa, ne3210_ids); - -static int ne3210_init(void) -{ - return eisa_driver_register (&ne3210_eisa_driver); -} - -static void ne3210_cleanup(void) -{ - eisa_driver_unregister (&ne3210_eisa_driver); -} - -module_init (ne3210_init); -module_exit (ne3210_cleanup); diff --git a/drivers/net/ethernet/8390/smc-ultra32.c b/drivers/net/ethernet/8390/smc-ultra32.c deleted file mode 100644 index 923e42aedcfd..000000000000 --- a/drivers/net/ethernet/8390/smc-ultra32.c +++ /dev/null @@ -1,463 +0,0 @@ -/* smc-ultra32.c: An SMC Ultra32 EISA ethernet driver for linux. - -Sources: - - This driver is based on (cloned from) the ISA SMC Ultra driver - written by Donald Becker. Modifications to support the EISA - version of the card by Paul Gortmaker and Leonard N. Zubkoff. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - -Theory of Operation: - - The SMC Ultra32C card uses the SMC 83c790 chip which is also - found on the ISA SMC Ultra cards. It has a shared memory mode of - operation that makes it similar to the ISA version of the card. - The main difference is that the EISA card has 32KB of RAM, but - only an 8KB window into that memory. The EISA card also can be - set for a bus-mastering mode of operation via the ECU, but that - is not (and probably will never be) supported by this driver. - The ECU should be run to enable shared memory and to disable the - bus-mastering feature for use with linux. - - By programming the 8390 to use only 8KB RAM, the modifications - to the ISA driver can be limited to the probe and initialization - code. This allows easy integration of EISA support into the ISA - driver. However, the driver development kit from SMC provided the - register information for sliding the 8KB window, and hence the 8390 - is programmed to use the full 32KB RAM. - - Unfortunately this required code changes outside the probe/init - routines, and thus we decided to separate the EISA driver from - the ISA one. In this way, ISA users don't end up with a larger - driver due to the EISA code, and EISA users don't end up with a - larger driver due to the ISA EtherEZ PIO code. The driver is - similar to the 3c503/16 driver, in that the window must be set - back to the 1st 8KB of space for access to the two 8390 Tx slots. - - In testing, using only 8KB RAM (3 Tx / 5 Rx) didn't appear to - be a limiting factor, since the EISA bus could get packets off - the card fast enough, but having the use of lots of RAM as Rx - space is extra insurance if interrupt latencies become excessive. - -*/ - -static const char *version = "smc-ultra32.c: 06/97 v1.00\n"; - - -#include <linux/module.h> -#include <linux/eisa.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> - -#include <asm/io.h> - -#include "8390.h" - -#define DRV_NAME "smc-ultra32" - -static int ultra32_probe1(struct net_device *dev, int ioaddr); -static int ultra32_open(struct net_device *dev); -static void ultra32_reset_8390(struct net_device *dev); -static void ultra32_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void ultra32_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void ultra32_block_output(struct net_device *dev, int count, - const unsigned char *buf, - const int start_page); -static int ultra32_close(struct net_device *dev); - -#define ULTRA32_CMDREG 0 /* Offset to ASIC command register. */ -#define ULTRA32_RESET 0x80 /* Board reset, in ULTRA32_CMDREG. */ -#define ULTRA32_MEMENB 0x40 /* Enable the shared memory. */ -#define ULTRA32_NIC_OFFSET 16 /* NIC register offset from the base_addr. */ -#define ULTRA32_IO_EXTENT 32 -#define EN0_ERWCNT 0x08 /* Early receive warning count. */ - -/* - * Defines that apply only to the Ultra32 EISA card. Note that - * "smc" = 10011 01101 00011 = 0x4da3, and hence !smc8010.cfg translates - * into an EISA ID of 0x1080A34D - */ -#define ULTRA32_BASE 0xca0 -#define ULTRA32_ID 0x1080a34d -#define ULTRA32_IDPORT (-0x20) /* 0xc80 */ -/* Config regs 1->7 from the EISA !SMC8010.CFG file. */ -#define ULTRA32_CFG1 0x04 /* 0xca4 */ -#define ULTRA32_CFG2 0x05 /* 0xca5 */ -#define ULTRA32_CFG3 (-0x18) /* 0xc88 */ -#define ULTRA32_CFG4 (-0x17) /* 0xc89 */ -#define ULTRA32_CFG5 (-0x16) /* 0xc8a */ -#define ULTRA32_CFG6 (-0x15) /* 0xc8b */ -#define ULTRA32_CFG7 0x0d /* 0xcad */ - -static void cleanup_card(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; - /* NB: ultra32_close_card() does free_irq */ - release_region(ioaddr, ULTRA32_IO_EXTENT); - iounmap(ei_status.mem); -} - -/* Probe for the Ultra32. This looks like a 8013 with the station - address PROM at I/O ports <base>+8 to <base>+13, with a checksum - following. -*/ - -struct net_device * __init ultra32_probe(int unit) -{ - struct net_device *dev; - int base; - int irq; - int err = -ENODEV; - - if (!EISA_bus) - return ERR_PTR(-ENODEV); - - dev = alloc_ei_netdev(); - - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - - irq = dev->irq; - - /* EISA spec allows for up to 16 slots, but 8 is typical. */ - for (base = 0x1000 + ULTRA32_BASE; base < 0x9000; base += 0x1000) { - if (ultra32_probe1(dev, base) == 0) - break; - dev->irq = irq; - } - if (base >= 0x9000) - goto out; - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - cleanup_card(dev); -out: - free_netdev(dev); - return ERR_PTR(err); -} - - -static const struct net_device_ops ultra32_netdev_ops = { - .ndo_open = ultra32_open, - .ndo_stop = ultra32_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_rx_mode = ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, -#endif -}; - -static int __init ultra32_probe1(struct net_device *dev, int ioaddr) -{ - int i, edge, media, retval; - int checksum = 0; - const char *model_name; - static unsigned version_printed; - /* Values from various config regs. */ - unsigned char idreg; - unsigned char reg4; - const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"}; - - if (!request_region(ioaddr, ULTRA32_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - if (inb(ioaddr + ULTRA32_IDPORT) == 0xff || - inl(ioaddr + ULTRA32_IDPORT) != ULTRA32_ID) { - retval = -ENODEV; - goto out; - } - - media = inb(ioaddr + ULTRA32_CFG7) & 0x03; - edge = inb(ioaddr + ULTRA32_CFG5) & 0x08; - printk("SMC Ultra32 in EISA Slot %d, Media: %s, %s IRQs.\n", - ioaddr >> 12, ifmap[media], - (edge ? "Edge Triggered" : "Level Sensitive")); - - idreg = inb(ioaddr + 7); - reg4 = inb(ioaddr + 4) & 0x7f; - - /* Check the ID nibble. */ - if ((idreg & 0xf0) != 0x20) { /* SMC Ultra */ - retval = -ENODEV; - goto out; - } - - /* Select the station address register set. */ - outb(reg4, ioaddr + 4); - - for (i = 0; i < 8; i++) - checksum += inb(ioaddr + 8 + i); - if ((checksum & 0xff) != 0xff) { - retval = -ENODEV; - goto out; - } - - if (ei_debug && version_printed++ == 0) - printk(version); - - model_name = "SMC Ultra32"; - - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + 8 + i); - - printk("%s: %s at 0x%X, %pM", - dev->name, model_name, ioaddr, dev->dev_addr); - - /* Switch from the station address to the alternate register set and - read the useful registers there. */ - outb(0x80 | reg4, ioaddr + 4); - - /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */ - outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c); - - /* Reset RAM addr. */ - outb(0x00, ioaddr + 0x0b); - - /* Switch back to the station address register set so that the - MS-DOS driver can find the card after a warm boot. */ - outb(reg4, ioaddr + 4); - - if ((inb(ioaddr + ULTRA32_CFG5) & 0x40) == 0) { - printk("\nsmc-ultra32: Card RAM is disabled! " - "Run EISA config utility.\n"); - retval = -ENODEV; - goto out; - } - if ((inb(ioaddr + ULTRA32_CFG2) & 0x04) == 0) - printk("\nsmc-ultra32: Ignoring Bus-Master enable bit. " - "Run EISA config utility.\n"); - - if (dev->irq < 2) { - unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15}; - int irq = irqmap[inb(ioaddr + ULTRA32_CFG5) & 0x07]; - if (irq == 0) { - printk(", failed to detect IRQ line.\n"); - retval = -EAGAIN; - goto out; - } - dev->irq = irq; - } - - /* The 8390 isn't at the base address, so fake the offset */ - dev->base_addr = ioaddr + ULTRA32_NIC_OFFSET; - - /* Save RAM address in the unused reg0 to avoid excess inb's. */ - ei_status.reg0 = inb(ioaddr + ULTRA32_CFG3) & 0xfc; - - dev->mem_start = 0xc0000 + ((ei_status.reg0 & 0x7c) << 11); - - ei_status.name = model_name; - ei_status.word16 = 1; - ei_status.tx_start_page = 0; - ei_status.rx_start_page = TX_PAGES; - /* All Ultra32 cards have 32KB memory with an 8KB window. */ - ei_status.stop_page = 128; - - ei_status.mem = ioremap(dev->mem_start, 0x2000); - if (!ei_status.mem) { - printk(", failed to ioremap.\n"); - retval = -ENOMEM; - goto out; - } - dev->mem_end = dev->mem_start + 0x1fff; - - printk(", IRQ %d, 32KB memory, 8KB window at 0x%lx-0x%lx.\n", - dev->irq, dev->mem_start, dev->mem_end); - ei_status.block_input = &ultra32_block_input; - ei_status.block_output = &ultra32_block_output; - ei_status.get_8390_hdr = &ultra32_get_8390_hdr; - ei_status.reset_8390 = &ultra32_reset_8390; - - dev->netdev_ops = &ultra32_netdev_ops; - NS8390_init(dev, 0); - - return 0; -out: - release_region(ioaddr, ULTRA32_IO_EXTENT); - return retval; -} - -static int ultra32_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC addr */ - int irq_flags = (inb(ioaddr + ULTRA32_CFG5) & 0x08) ? 0 : IRQF_SHARED; - int retval; - - retval = request_irq(dev->irq, ei_interrupt, irq_flags, dev->name, dev); - if (retval) - return retval; - - outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */ - outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */ - outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */ - outb(0x01, ioaddr + 6); /* Enable Interrupts. */ - /* Set the early receive warning level in window 0 high enough not - to receive ERW interrupts. */ - outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr); - outb(0xff, dev->base_addr + EN0_ERWCNT); - ei_open(dev); - return 0; -} - -static int ultra32_close(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* CMDREG */ - - netif_stop_queue(dev); - - if (ei_debug > 1) - printk("%s: Shutting down ethercard.\n", dev->name); - - outb(0x00, ioaddr + ULTRA32_CFG6); /* Disable Interrupts. */ - outb(0x00, ioaddr + 6); /* Disable interrupts. */ - free_irq(dev->irq, dev); - - NS8390_init(dev, 0); - - return 0; -} - -static void ultra32_reset_8390(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC base addr */ - - outb(ULTRA32_RESET, ioaddr); - if (ei_debug > 1) printk("resetting Ultra32, t=%ld...", jiffies); - ei_status.txing = 0; - - outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */ - outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */ - outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */ - outb(0x01, ioaddr + 6); /* Enable Interrupts. */ - if (ei_debug > 1) printk("reset done\n"); -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ - -static void ultra32_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, - int ring_page) -{ - void __iomem *hdr_start = ei_status.mem + ((ring_page & 0x1f) << 8); - unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3; - - /* Select correct 8KB Window. */ - outb(ei_status.reg0 | ((ring_page & 0x60) >> 5), RamReg); - -#ifdef __BIG_ENDIAN - /* Officially this is what we are doing, but the readl() is faster */ - /* unfortunately it isn't endian aware of the struct */ - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); - hdr->count = le16_to_cpu(hdr->count); -#else - ((unsigned int*)hdr)[0] = readl(hdr_start); -#endif -} - -/* Block input and output are easy on shared memory ethercards, the only - complication is when the ring buffer wraps, or in this case, when a - packet spans an 8KB boundary. Note that the current 8KB segment is - already set by the get_8390_hdr routine. */ - -static void ultra32_block_input(struct net_device *dev, - int count, - struct sk_buff *skb, - int ring_offset) -{ - void __iomem *xfer_start = ei_status.mem + (ring_offset & 0x1fff); - unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3; - - if ((ring_offset & ~0x1fff) != ((ring_offset + count - 1) & ~0x1fff)) { - int semi_count = 8192 - (ring_offset & 0x1FFF); - memcpy_fromio(skb->data, xfer_start, semi_count); - count -= semi_count; - if (ring_offset < 96*256) { - /* Select next 8KB Window. */ - ring_offset += semi_count; - outb(ei_status.reg0 | ((ring_offset & 0x6000) >> 13), RamReg); - memcpy_fromio(skb->data + semi_count, ei_status.mem, count); - } else { - /* Select first 8KB Window. */ - outb(ei_status.reg0, RamReg); - memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); - } - } else { - memcpy_fromio(skb->data, xfer_start, count); - } -} - -static void ultra32_block_output(struct net_device *dev, - int count, - const unsigned char *buf, - int start_page) -{ - void __iomem *xfer_start = ei_status.mem + (start_page<<8); - unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3; - - /* Select first 8KB Window. */ - outb(ei_status.reg0, RamReg); - - memcpy_toio(xfer_start, buf, count); -} - -#ifdef MODULE -#define MAX_ULTRA32_CARDS 4 /* Max number of Ultra cards per module */ -static struct net_device *dev_ultra[MAX_ULTRA32_CARDS]; - -MODULE_DESCRIPTION("SMC Ultra32 EISA ethernet driver"); -MODULE_LICENSE("GPL"); - -int __init init_module(void) -{ - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) { - struct net_device *dev = ultra32_probe(-1); - if (IS_ERR(dev)) - break; - dev_ultra[found++] = dev; - } - if (found) - return 0; - printk(KERN_WARNING "smc-ultra32.c: No SMC Ultra32 found.\n"); - return -ENXIO; -} - -void __exit cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) { - struct net_device *dev = dev_ultra[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ - diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index e4ff38949112..ed956e08d38b 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -135,7 +135,6 @@ config ETHOC source "drivers/net/ethernet/packetengines/Kconfig" source "drivers/net/ethernet/pasemi/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" -source "drivers/net/ethernet/racal/Kconfig" source "drivers/net/ethernet/realtek/Kconfig" source "drivers/net/ethernet/renesas/Kconfig" source "drivers/net/ethernet/rdc/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index d4473072654a..8268d85f9448 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -53,7 +53,6 @@ obj-$(CONFIG_ETHOC) += ethoc.o obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ -obj-$(CONFIG_NET_VENDOR_RACAL) += racal/ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ obj-$(CONFIG_SH_ETH) += renesas/ obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index c1fdb8be8bee..a175d0be1ae1 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -425,8 +425,8 @@ static int mii_probe(struct net_device *dev, int phy_mode) return -EINVAL; } - phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link, - 0, phy_mode); + phydev = phy_connect(dev, dev_name(&phydev->dev), + &bfin_mac_adjust_link, phy_mode); if (IS_ERR(phydev)) { netdev_err(dev, "could not attach PHY\n"); @@ -498,10 +498,10 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, KBUILD_MODNAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->fw_version, "N/A"); - strcpy(info->bus_info, dev_name(&dev->dev)); + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info)); } static void bfin_mac_ethtool_getwol(struct net_device *dev, @@ -647,7 +647,6 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p) if (netif_running(dev)) return -EBUSY; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - dev->addr_assign_type &= ~NET_ADDR_RANDOM; setup_mac_addr(dev->dev_addr); return 0; } diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index aa53115bb38b..0be2195e5034 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1127,10 +1127,11 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in { struct greth_private *greth = netdev_priv(dev); - strncpy(info->driver, dev_driver_string(greth->dev), 32); - strncpy(info->version, "revision: 1.0", 32); - strncpy(info->bus_info, greth->dev->bus->name, 32); - strncpy(info->fw_version, "N/A", 32); + strlcpy(info->driver, dev_driver_string(greth->dev), + sizeof(info->driver)); + strlcpy(info->version, "revision: 1.0", sizeof(info->version)); + strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); info->eedump_len = 0; info->regdump_len = sizeof(struct greth_regs); } @@ -1287,9 +1288,7 @@ static int greth_mdio_probe(struct net_device *dev) } ret = phy_connect_direct(dev, phy, &greth_link_change, - 0, greth->gbit_mac ? - PHY_INTERFACE_MODE_GMII : - PHY_INTERFACE_MODE_MII); + greth->gbit_mac ? PHY_INTERFACE_MODE_GMII : PHY_INTERFACE_MODE_MII); if (ret) { if (netif_msg_ifup(greth)) dev_err(&dev->dev, "could not attach to PHY\n"); diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 8350f4b37a8a..13d74aa4033d 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -7,7 +7,7 @@ config NET_VENDOR_AMD default y depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \ SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \ - (ARM && ARCH_EBSA110) || ISA || EISA || MCA || PCMCIA + (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA ---help--- If you have a network (Ethernet) chipset belonging to this class, say Y. @@ -105,19 +105,6 @@ config DECLANCE DEC (now Compaq) based on the AMD LANCE chipset, including the DEPCA series. (This chipset is better known via the NE2100 cards.) -config DEPCA - tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support" - depends on (ISA || EISA || MCA) - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto> as well as - <file:drivers/net/ethernet/amd/depca.c>. - - To compile this driver as a module, choose M here. The module - will be called depca. - config HPLANCE bool "HP on-board LANCE support" depends on DIO diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile index 175caa5328c9..cdd4301a973d 100644 --- a/drivers/net/ethernet/amd/Makefile +++ b/drivers/net/ethernet/amd/Makefile @@ -8,7 +8,6 @@ obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o obj-$(CONFIG_ARIADNE) += ariadne.o obj-$(CONFIG_ATARILANCE) += atarilance.o obj-$(CONFIG_DECLANCE) += declance.o -obj-$(CONFIG_DEPCA) += depca.o obj-$(CONFIG_HPLANCE) += hplance.o 7990.o obj-$(CONFIG_LANCE) += lance.o obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 2ea221ed4777..de774d419144 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -437,8 +437,8 @@ static int au1000_mii_probe(struct net_device *dev) /* now we are supposed to have a proper phydev, to attach to... */ BUG_ON(phydev->attached_dev); - phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link, - 0, PHY_INTERFACE_MODE_MII); + phydev = phy_connect(dev, dev_name(&phydev->dev), + &au1000_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { netdev_err(dev, "Could not attach to PHY\n"); @@ -587,10 +587,10 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct au1000_private *aup = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - info->fw_version[0] = '\0'; - sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME, + aup->mac_id); info->regdump_len = 0; } diff --git a/drivers/net/ethernet/amd/depca.c b/drivers/net/ethernet/amd/depca.c deleted file mode 100644 index 34a485363d5b..000000000000 --- a/drivers/net/ethernet/amd/depca.c +++ /dev/null @@ -1,1910 +0,0 @@ -/* depca.c: A DIGITAL DEPCA & EtherWORKS ethernet driver for linux. - - Written 1994, 1995 by David C. Davies. - - - Copyright 1994 David C. Davies - and - United States Government - (as represented by the Director, National Security Agency). - - Copyright 1995 Digital Equipment Corporation. - - - This software may be used and distributed according to the terms of - the GNU General Public License, incorporated herein by reference. - - This driver is written for the Digital Equipment Corporation series - of DEPCA and EtherWORKS ethernet cards: - - DEPCA (the original) - DE100 - DE101 - DE200 Turbo - DE201 Turbo - DE202 Turbo (TP BNC) - DE210 - DE422 (EISA) - - The driver has been tested on DE100, DE200 and DE202 cards in a - relatively busy network. The DE422 has been tested a little. - - This driver will NOT work for the DE203, DE204 and DE205 series of - cards, since they have a new custom ASIC in place of the AMD LANCE - chip. See the 'ewrk3.c' driver in the Linux source tree for running - those cards. - - I have benchmarked the driver with a DE100 at 595kB/s to (542kB/s from) - a DECstation 5000/200. - - The author may be reached at davies@maniac.ultranet.com - - ========================================================================= - - The driver was originally based on the 'lance.c' driver from Donald - Becker which is included with the standard driver distribution for - linux. V0.4 is a complete re-write with only the kernel interface - remaining from the original code. - - 1) Lance.c code in /linux/drivers/net/ - 2) "Ethernet/IEEE 802.3 Family. 1992 World Network Data Book/Handbook", - AMD, 1992 [(800) 222-9323]. - 3) "Am79C90 CMOS Local Area Network Controller for Ethernet (C-LANCE)", - AMD, Pub. #17881, May 1993. - 4) "Am79C960 PCnet-ISA(tm), Single-Chip Ethernet Controller for ISA", - AMD, Pub. #16907, May 1992 - 5) "DEC EtherWORKS LC Ethernet Controller Owners Manual", - Digital Equipment corporation, 1990, Pub. #EK-DE100-OM.003 - 6) "DEC EtherWORKS Turbo Ethernet Controller Owners Manual", - Digital Equipment corporation, 1990, Pub. #EK-DE200-OM.003 - 7) "DEPCA Hardware Reference Manual", Pub. #EK-DEPCA-PR - Digital Equipment Corporation, 1989 - 8) "DEC EtherWORKS Turbo_(TP BNC) Ethernet Controller Owners Manual", - Digital Equipment corporation, 1991, Pub. #EK-DE202-OM.001 - - - Peter Bauer's depca.c (V0.5) was referred to when debugging V0.1 of this - driver. - - The original DEPCA card requires that the ethernet ROM address counter - be enabled to count and has an 8 bit NICSR. The ROM counter enabling is - only done when a 0x08 is read as the first address octet (to minimise - the chances of writing over some other hardware's I/O register). The - NICSR accesses have been changed to byte accesses for all the cards - supported by this driver, since there is only one useful bit in the MSB - (remote boot timeout) and it is not used. Also, there is a maximum of - only 48kB network RAM for this card. My thanks to Torbjorn Lindh for - help debugging all this (and holding my feet to the fire until I got it - right). - - The DE200 series boards have on-board 64kB RAM for use as a shared - memory network buffer. Only the DE100 cards make use of a 2kB buffer - mode which has not been implemented in this driver (only the 32kB and - 64kB modes are supported [16kB/48kB for the original DEPCA]). - - At the most only 2 DEPCA cards can be supported on the ISA bus because - there is only provision for two I/O base addresses on each card (0x300 - and 0x200). The I/O address is detected by searching for a byte sequence - in the Ethernet station address PROM at the expected I/O address for the - Ethernet PROM. The shared memory base address is 'autoprobed' by - looking for the self test PROM and detecting the card name. When a - second DEPCA is detected, information is placed in the base_addr - variable of the next device structure (which is created if necessary), - thus enabling ethif_probe initialization for the device. More than 2 - EISA cards can be supported, but care will be needed assigning the - shared memory to ensure that each slot has the correct IRQ, I/O address - and shared memory address assigned. - - ************************************************************************ - - NOTE: If you are using two ISA DEPCAs, it is important that you assign - the base memory addresses correctly. The driver autoprobes I/O 0x300 - then 0x200. The base memory address for the first device must be less - than that of the second so that the auto probe will correctly assign the - I/O and memory addresses on the same card. I can't think of a way to do - this unambiguously at the moment, since there is nothing on the cards to - tie I/O and memory information together. - - I am unable to test 2 cards together for now, so this code is - unchecked. All reports, good or bad, are welcome. - - ************************************************************************ - - The board IRQ setting must be at an unused IRQ which is auto-probed - using Donald Becker's autoprobe routines. DEPCA and DE100 board IRQs are - {2,3,4,5,7}, whereas the DE200 is at {5,9,10,11,15}. Note that IRQ2 is - really IRQ9 in machines with 16 IRQ lines. - - No 16MB memory limitation should exist with this driver as DMA is not - used and the common memory area is in low memory on the network card (my - current system has 20MB and I've not had problems yet). - - The ability to load this driver as a loadable module has been added. To - utilise this ability, you have to do <8 things: - - 0) have a copy of the loadable modules code installed on your system. - 1) copy depca.c from the /linux/drivers/net directory to your favourite - temporary directory. - 2) if you wish, edit the source code near line 1530 to reflect the I/O - address and IRQ you're using (see also 5). - 3) compile depca.c, but include -DMODULE in the command line to ensure - that the correct bits are compiled (see end of source code). - 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a - kernel with the depca configuration turned off and reboot. - 5) insmod depca.o [irq=7] [io=0x200] [mem=0xd0000] [adapter_name=DE100] - [Alan Cox: Changed the code to allow command line irq/io assignments] - [Dave Davies: Changed the code to allow command line mem/name - assignments] - 6) run the net startup bits for your eth?? interface manually - (usually /etc/rc.inet[12] at boot time). - 7) enjoy! - - Note that autoprobing is not allowed in loadable modules - the system is - already up and running and you're messing with interrupts. - - To unload a module, turn off the associated interface - 'ifconfig eth?? down' then 'rmmod depca'. - - To assign a base memory address for the shared memory when running as a - loadable module, see 5 above. To include the adapter name (if you have - no PROM but know the card name) also see 5 above. Note that this last - option will not work with kernel built-in depca's. - - The shared memory assignment for a loadable module makes sense to avoid - the 'memory autoprobe' picking the wrong shared memory (for the case of - 2 depca's in a PC). - - ************************************************************************ - Support for MCA EtherWORKS cards added 11-3-98. (MCA since deleted) - Verified to work with up to 2 DE212 cards in a system (although not - fully stress-tested). - - Revision History - ---------------- - - Version Date Description - - 0.1 25-jan-94 Initial writing. - 0.2 27-jan-94 Added LANCE TX hardware buffer chaining. - 0.3 1-feb-94 Added multiple DEPCA support. - 0.31 4-feb-94 Added DE202 recognition. - 0.32 19-feb-94 Tidy up. Improve multi-DEPCA support. - 0.33 25-feb-94 Fix DEPCA ethernet ROM counter enable. - Add jabber packet fix from murf@perftech.com - and becker@super.org - 0.34 7-mar-94 Fix DEPCA max network memory RAM & NICSR access. - 0.35 8-mar-94 Added DE201 recognition. Tidied up. - 0.351 30-apr-94 Added EISA support. Added DE422 recognition. - 0.36 16-may-94 DE422 fix released. - 0.37 22-jul-94 Added MODULE support - 0.38 15-aug-94 Added DBR ROM switch in depca_close(). - Multi DEPCA bug fix. - 0.38axp 15-sep-94 Special version for Alpha AXP Linux V1.0. - 0.381 12-dec-94 Added DE101 recognition, fix multicast bug. - 0.382 9-feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>. - 0.383 22-feb-95 Fix for conflict with VESA SCSI reported by - <stromain@alf.dec.com> - 0.384 17-mar-95 Fix a ring full bug reported by <bkm@star.rl.ac.uk> - 0.385 3-apr-95 Fix a recognition bug reported by - <ryan.niemi@lastfrontier.com> - 0.386 21-apr-95 Fix the last fix...sorry, must be galloping senility - 0.40 25-May-95 Rewrite for portability & updated. - ALPHA support from <jestabro@amt.tay1.dec.com> - 0.41 26-Jun-95 Added verify_area() calls in depca_ioctl() from - suggestion by <heiko@colossus.escape.de> - 0.42 27-Dec-95 Add 'mem' shared memory assignment for loadable - modules. - Add 'adapter_name' for loadable modules when no PROM. - Both above from a suggestion by - <pchen@woodruffs121.residence.gatech.edu>. - Add new multicasting code. - 0.421 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi> - 0.422 29-Apr-96 Fix depca_hw_init() bug <jari@markkus2.fimr.fi> - 0.423 7-Jun-96 Fix module load bug <kmg@barco.be> - 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c - 0.44 1-Sep-97 Fix *_probe() to test check_region() first - bug - reported by <mmogilvi@elbert.uccs.edu> - 0.45 3-Nov-98 Added support for MCA EtherWORKS (DE210/DE212) cards - by <tymm@computer.org> - 0.451 5-Nov-98 Fixed mca stuff cuz I'm a dummy. <tymm@computer.org> - 0.5 14-Nov-98 Re-spin for 2.1.x kernels. - 0.51 27-Jun-99 Correct received packet length for CRC from - report by <worm@dkik.dk> - 0.52 16-Oct-00 Fixes for 2.3 io memory accesses - Fix show-stopper (ints left masked) in depca_interrupt - by <peterd@pnd-pc.demon.co.uk> - 0.53 12-Jan-01 Release resources on failure, bss tidbits - by acme@conectiva.com.br - 0.54 08-Nov-01 use library crc32 functions - by Matt_Domsch@dell.com - 0.55 01-Mar-03 Use EISA/sysfs framework <maz@wild-wind.fr.eu.org> - - ========================================================================= -*/ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/crc32.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/time.h> -#include <linux/types.h> -#include <linux/unistd.h> -#include <linux/ctype.h> -#include <linux/moduleparam.h> -#include <linux/platform_device.h> -#include <linux/bitops.h> - -#include <asm/uaccess.h> -#include <asm/io.h> -#include <asm/dma.h> - -#ifdef CONFIG_EISA -#include <linux/eisa.h> -#endif - -#include "depca.h" - -static char version[] __initdata = "depca.c:v0.53 2001/1/12 davies@maniac.ultranet.com\n"; - -#ifdef DEPCA_DEBUG -static int depca_debug = DEPCA_DEBUG; -#else -static int depca_debug = 1; -#endif - -#define DEPCA_NDA 0xffe0 /* No Device Address */ - -#define TX_TIMEOUT (1*HZ) - -/* -** Ethernet PROM defines -*/ -#define PROBE_LENGTH 32 -#define ETH_PROM_SIG 0xAA5500FFUL - -/* -** Set the number of Tx and Rx buffers. Ensure that the memory requested -** here is <= to the amount of shared memory set up by the board switches. -** The number of descriptors MUST BE A POWER OF 2. -** -** total_memory = NUM_RX_DESC*(8+RX_BUFF_SZ) + NUM_TX_DESC*(8+TX_BUFF_SZ) -*/ -#define NUM_RX_DESC 8 /* Number of RX descriptors */ -#define NUM_TX_DESC 8 /* Number of TX descriptors */ -#define RX_BUFF_SZ 1536 /* Buffer size for each Rx buffer */ -#define TX_BUFF_SZ 1536 /* Buffer size for each Tx buffer */ - -/* -** EISA bus defines -*/ -#define DEPCA_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */ - -/* -** ISA Bus defines -*/ -#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000} -#define DEPCA_TOTAL_SIZE 0x10 - -static struct { - u_long iobase; - struct platform_device *device; -} depca_io_ports[] = { - { 0x300, NULL }, - { 0x200, NULL }, - { 0 , NULL }, -}; - -/* -** Name <-> Adapter mapping -*/ -#define DEPCA_SIGNATURE {"DEPCA",\ - "DE100","DE101",\ - "DE200","DE201","DE202",\ - "DE210","DE212",\ - "DE422",\ - ""} - -static char* __initdata depca_signature[] = DEPCA_SIGNATURE; - -enum depca_type { - DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown -}; - -static char depca_string[] = "depca"; - -static int depca_device_remove (struct device *device); - -#ifdef CONFIG_EISA -static struct eisa_device_id depca_eisa_ids[] = { - { "DEC4220", de422 }, - { "" } -}; -MODULE_DEVICE_TABLE(eisa, depca_eisa_ids); - -static int depca_eisa_probe (struct device *device); - -static struct eisa_driver depca_eisa_driver = { - .id_table = depca_eisa_ids, - .driver = { - .name = depca_string, - .probe = depca_eisa_probe, - .remove = depca_device_remove - } -}; -#endif - -static int depca_isa_probe (struct platform_device *); - -static int depca_isa_remove(struct platform_device *pdev) -{ - return depca_device_remove(&pdev->dev); -} - -static struct platform_driver depca_isa_driver = { - .probe = depca_isa_probe, - .remove = depca_isa_remove, - .driver = { - .name = depca_string, - }, -}; - -/* -** Miscellaneous info... -*/ -#define DEPCA_STRLEN 16 - -/* -** Memory Alignment. Each descriptor is 4 longwords long. To force a -** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and -** DESC_ALIGN. DEPCA_ALIGN aligns the start address of the private memory area -** and hence the RX descriptor ring's first entry. -*/ -#define DEPCA_ALIGN4 ((u_long)4 - 1) /* 1 longword align */ -#define DEPCA_ALIGN8 ((u_long)8 - 1) /* 2 longword (quadword) align */ -#define DEPCA_ALIGN DEPCA_ALIGN8 /* Keep the LANCE happy... */ - -/* -** The DEPCA Rx and Tx ring descriptors. -*/ -struct depca_rx_desc { - volatile s32 base; - s16 buf_length; /* This length is negative 2's complement! */ - s16 msg_length; /* This length is "normal". */ -}; - -struct depca_tx_desc { - volatile s32 base; - s16 length; /* This length is negative 2's complement! */ - s16 misc; /* Errors and TDR info */ -}; - -#define LA_MASK 0x0000ffff /* LANCE address mask for mapping network RAM - to LANCE memory address space */ - -/* -** The Lance initialization block, described in databook, in common memory. -*/ -struct depca_init { - u16 mode; /* Mode register */ - u8 phys_addr[ETH_ALEN]; /* Physical ethernet address */ - u8 mcast_table[8]; /* Multicast Hash Table. */ - u32 rx_ring; /* Rx ring base pointer & ring length */ - u32 tx_ring; /* Tx ring base pointer & ring length */ -}; - -#define DEPCA_PKT_STAT_SZ 16 -#define DEPCA_PKT_BIN_SZ 128 /* Should be >=100 unless you - increase DEPCA_PKT_STAT_SZ */ -struct depca_private { - char adapter_name[DEPCA_STRLEN]; /* /proc/ioports string */ - enum depca_type adapter; /* Adapter type */ - enum { - DEPCA_BUS_ISA = 1, - DEPCA_BUS_EISA, - } depca_bus; /* type of bus */ - struct depca_init init_block; /* Shadow Initialization block */ -/* CPU address space fields */ - struct depca_rx_desc __iomem *rx_ring; /* Pointer to start of RX descriptor ring */ - struct depca_tx_desc __iomem *tx_ring; /* Pointer to start of TX descriptor ring */ - void __iomem *rx_buff[NUM_RX_DESC]; /* CPU virt address of sh'd memory buffs */ - void __iomem *tx_buff[NUM_TX_DESC]; /* CPU virt address of sh'd memory buffs */ - void __iomem *sh_mem; /* CPU mapped virt address of device RAM */ - u_long mem_start; /* Bus address of device RAM (before remap) */ - u_long mem_len; /* device memory size */ -/* Device address space fields */ - u_long device_ram_start; /* Start of RAM in device addr space */ -/* Offsets used in both address spaces */ - u_long rx_ring_offset; /* Offset from start of RAM to rx_ring */ - u_long tx_ring_offset; /* Offset from start of RAM to tx_ring */ - u_long buffs_offset; /* LANCE Rx and Tx buffers start address. */ -/* Kernel-only (not device) fields */ - int rx_new, tx_new; /* The next free ring entry */ - int rx_old, tx_old; /* The ring entries to be free()ed. */ - spinlock_t lock; - struct { /* Private stats counters */ - u32 bins[DEPCA_PKT_STAT_SZ]; - u32 unicast; - u32 multicast; - u32 broadcast; - u32 excessive_collisions; - u32 tx_underruns; - u32 excessive_underruns; - } pktStats; - int txRingMask; /* TX ring mask */ - int rxRingMask; /* RX ring mask */ - s32 rx_rlen; /* log2(rxRingMask+1) for the descriptors */ - s32 tx_rlen; /* log2(txRingMask+1) for the descriptors */ -}; - -/* -** The transmit ring full condition is described by the tx_old and tx_new -** pointers by: -** tx_old = tx_new Empty ring -** tx_old = tx_new+1 Full ring -** tx_old+txRingMask = tx_new Full ring (wrapped condition) -*/ -#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ - lp->tx_old+lp->txRingMask-lp->tx_new:\ - lp->tx_old -lp->tx_new-1) - -/* -** Public Functions -*/ -static int depca_open(struct net_device *dev); -static netdev_tx_t depca_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t depca_interrupt(int irq, void *dev_id); -static int depca_close(struct net_device *dev); -static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static void depca_tx_timeout(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); - -/* -** Private functions -*/ -static void depca_init_ring(struct net_device *dev); -static int depca_rx(struct net_device *dev); -static int depca_tx(struct net_device *dev); - -static void LoadCSRs(struct net_device *dev); -static int InitRestartDepca(struct net_device *dev); -static int DepcaSignature(char *name, u_long paddr); -static int DevicePresent(u_long ioaddr); -static int get_hw_addr(struct net_device *dev); -static void SetMulticastFilter(struct net_device *dev); -static int load_packet(struct net_device *dev, struct sk_buff *skb); -static void depca_dbg_open(struct net_device *dev); - -static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 }; -static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 }; -static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 }; -static u_char *depca_irq; - -static int irq; -static int io; -static char *adapter_name; -static int mem; /* For loadable module assignment - use insmod mem=0x????? .... */ -module_param (irq, int, 0); -module_param (io, int, 0); -module_param (adapter_name, charp, 0); -module_param (mem, int, 0); -MODULE_PARM_DESC(irq, "DEPCA IRQ number"); -MODULE_PARM_DESC(io, "DEPCA I/O base address"); -MODULE_PARM_DESC(adapter_name, "DEPCA adapter name"); -MODULE_PARM_DESC(mem, "DEPCA shared memory address"); -MODULE_LICENSE("GPL"); - -/* -** Miscellaneous defines... -*/ -#define STOP_DEPCA \ - outw(CSR0, DEPCA_ADDR);\ - outw(STOP, DEPCA_DATA) - -static const struct net_device_ops depca_netdev_ops = { - .ndo_open = depca_open, - .ndo_start_xmit = depca_start_xmit, - .ndo_stop = depca_close, - .ndo_set_rx_mode = set_multicast_list, - .ndo_do_ioctl = depca_ioctl, - .ndo_tx_timeout = depca_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __init depca_hw_init (struct net_device *dev, struct device *device) -{ - struct depca_private *lp; - int i, j, offset, netRAM, mem_len, status = 0; - s16 nicsr; - u_long ioaddr; - u_long mem_start; - - /* - * We are now supposed to enter this function with the - * following fields filled with proper values : - * - * dev->base_addr - * lp->mem_start - * lp->depca_bus - * lp->adapter - * - * dev->irq can be set if known from device configuration (on - * MCA or EISA) or module option. Otherwise, it will be auto - * detected. - */ - - ioaddr = dev->base_addr; - - STOP_DEPCA; - - nicsr = inb(DEPCA_NICSR); - nicsr = ((nicsr & ~SHE & ~RBE & ~IEN) | IM); - outb(nicsr, DEPCA_NICSR); - - if (inw(DEPCA_DATA) != STOP) { - return -ENXIO; - } - - lp = netdev_priv(dev); - mem_start = lp->mem_start; - - if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown) - return -ENXIO; - - printk("%s: %s at 0x%04lx", - dev_name(device), depca_signature[lp->adapter], ioaddr); - - switch (lp->depca_bus) { -#ifdef CONFIG_EISA - case DEPCA_BUS_EISA: - printk(" (EISA slot %d)", to_eisa_device(device)->slot); - break; -#endif - - case DEPCA_BUS_ISA: - break; - - default: - printk("Unknown DEPCA bus %d\n", lp->depca_bus); - return -ENXIO; - } - - printk(", h/w address "); - status = get_hw_addr(dev); - printk("%pM", dev->dev_addr); - if (status != 0) { - printk(" which has an Ethernet PROM CRC error.\n"); - return -ENXIO; - } - - /* Set up the maximum amount of network RAM(kB) */ - netRAM = ((lp->adapter != DEPCA) ? 64 : 48); - if ((nicsr & _128KB) && (lp->adapter == de422)) - netRAM = 128; - - /* Shared Memory Base Address */ - if (nicsr & BUF) { - nicsr &= ~BS; /* DEPCA RAM in top 32k */ - netRAM -= 32; - mem_start += 0x8000; - } - - if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init))) - > (netRAM << 10)) { - printk(",\n requests %dkB RAM: only %dkB is available!\n", (mem_len >> 10), netRAM); - return -ENXIO; - } - - printk(",\n has %dkB RAM at 0x%.5lx", netRAM, mem_start); - - /* Enable the shadow RAM. */ - if (lp->adapter != DEPCA) { - nicsr |= SHE; - outb(nicsr, DEPCA_NICSR); - } - - spin_lock_init(&lp->lock); - sprintf(lp->adapter_name, "%s (%s)", - depca_signature[lp->adapter], dev_name(device)); - status = -EBUSY; - - /* Initialisation Block */ - if (!request_mem_region (mem_start, mem_len, lp->adapter_name)) { - printk(KERN_ERR "depca: cannot request ISA memory, aborting\n"); - goto out_priv; - } - - status = -EIO; - lp->sh_mem = ioremap(mem_start, mem_len); - if (lp->sh_mem == NULL) { - printk(KERN_ERR "depca: cannot remap ISA memory, aborting\n"); - goto out1; - } - - lp->mem_start = mem_start; - lp->mem_len = mem_len; - lp->device_ram_start = mem_start & LA_MASK; - - offset = 0; - offset += sizeof(struct depca_init); - - /* Tx & Rx descriptors (aligned to a quadword boundary) */ - offset = (offset + DEPCA_ALIGN) & ~DEPCA_ALIGN; - lp->rx_ring = lp->sh_mem + offset; - lp->rx_ring_offset = offset; - - offset += (sizeof(struct depca_rx_desc) * NUM_RX_DESC); - lp->tx_ring = lp->sh_mem + offset; - lp->tx_ring_offset = offset; - - offset += (sizeof(struct depca_tx_desc) * NUM_TX_DESC); - - lp->buffs_offset = offset; - - /* Finish initialising the ring information. */ - lp->rxRingMask = NUM_RX_DESC - 1; - lp->txRingMask = NUM_TX_DESC - 1; - - /* Calculate Tx/Rx RLEN size for the descriptors. */ - for (i = 0, j = lp->rxRingMask; j > 0; i++) { - j >>= 1; - } - lp->rx_rlen = (s32) (i << 29); - for (i = 0, j = lp->txRingMask; j > 0; i++) { - j >>= 1; - } - lp->tx_rlen = (s32) (i << 29); - - /* Load the initialisation block */ - depca_init_ring(dev); - - /* Initialise the control and status registers */ - LoadCSRs(dev); - - /* Enable DEPCA board interrupts for autoprobing */ - nicsr = ((nicsr & ~IM) | IEN); - outb(nicsr, DEPCA_NICSR); - - /* To auto-IRQ we enable the initialization-done and DMA err, - interrupts. For now we will always get a DMA error. */ - if (dev->irq < 2) { - unsigned char irqnum; - unsigned long irq_mask, delay; - - irq_mask = probe_irq_on(); - - /* Assign the correct irq list */ - switch (lp->adapter) { - case DEPCA: - case de100: - case de101: - depca_irq = de1xx_irq; - break; - case de200: - case de201: - case de202: - case de210: - case de212: - depca_irq = de2xx_irq; - break; - case de422: - depca_irq = de422_irq; - break; - - default: - break; /* Not reached */ - } - - /* Trigger an initialization just for the interrupt. */ - outw(INEA | INIT, DEPCA_DATA); - - delay = jiffies + HZ/50; - while (time_before(jiffies, delay)) - yield(); - - irqnum = probe_irq_off(irq_mask); - - status = -ENXIO; - if (!irqnum) { - printk(" and failed to detect IRQ line.\n"); - goto out2; - } else { - for (dev->irq = 0, i = 0; (depca_irq[i]) && (!dev->irq); i++) - if (irqnum == depca_irq[i]) { - dev->irq = irqnum; - printk(" and uses IRQ%d.\n", dev->irq); - } - - if (!dev->irq) { - printk(" but incorrect IRQ line detected.\n"); - goto out2; - } - } - } else { - printk(" and assigned IRQ%d.\n", dev->irq); - } - - if (depca_debug > 1) { - printk(version); - } - - /* The DEPCA-specific entries in the device structure. */ - dev->netdev_ops = &depca_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - - dev->mem_start = 0; - - dev_set_drvdata(device, dev); - SET_NETDEV_DEV (dev, device); - - status = register_netdev(dev); - if (status == 0) - return 0; -out2: - iounmap(lp->sh_mem); -out1: - release_mem_region (mem_start, mem_len); -out_priv: - return status; -} - - -static int depca_open(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_long ioaddr = dev->base_addr; - s16 nicsr; - int status = 0; - - STOP_DEPCA; - nicsr = inb(DEPCA_NICSR); - - /* Make sure the shadow RAM is enabled */ - if (lp->adapter != DEPCA) { - nicsr |= SHE; - outb(nicsr, DEPCA_NICSR); - } - - /* Re-initialize the DEPCA... */ - depca_init_ring(dev); - LoadCSRs(dev); - - depca_dbg_open(dev); - - if (request_irq(dev->irq, depca_interrupt, 0, lp->adapter_name, dev)) { - printk("depca_open(): Requested IRQ%d is busy\n", dev->irq); - status = -EAGAIN; - } else { - - /* Enable DEPCA board interrupts and turn off LED */ - nicsr = ((nicsr & ~IM & ~LED) | IEN); - outb(nicsr, DEPCA_NICSR); - outw(CSR0, DEPCA_ADDR); - - netif_start_queue(dev); - - status = InitRestartDepca(dev); - - if (depca_debug > 1) { - printk("CSR0: 0x%4.4x\n", inw(DEPCA_DATA)); - printk("nicsr: 0x%02x\n", inb(DEPCA_NICSR)); - } - } - return status; -} - -/* Initialize the lance Rx and Tx descriptor rings. */ -static void depca_init_ring(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_int i; - u_long offset; - - /* Lock out other processes whilst setting up the hardware */ - netif_stop_queue(dev); - - lp->rx_new = lp->tx_new = 0; - lp->rx_old = lp->tx_old = 0; - - /* Initialize the base address and length of each buffer in the ring */ - for (i = 0; i <= lp->rxRingMask; i++) { - offset = lp->buffs_offset + i * RX_BUFF_SZ; - writel((lp->device_ram_start + offset) | R_OWN, &lp->rx_ring[i].base); - writew(-RX_BUFF_SZ, &lp->rx_ring[i].buf_length); - lp->rx_buff[i] = lp->sh_mem + offset; - } - - for (i = 0; i <= lp->txRingMask; i++) { - offset = lp->buffs_offset + (i + lp->rxRingMask + 1) * TX_BUFF_SZ; - writel((lp->device_ram_start + offset) & 0x00ffffff, &lp->tx_ring[i].base); - lp->tx_buff[i] = lp->sh_mem + offset; - } - - /* Set up the initialization block */ - lp->init_block.rx_ring = (lp->device_ram_start + lp->rx_ring_offset) | lp->rx_rlen; - lp->init_block.tx_ring = (lp->device_ram_start + lp->tx_ring_offset) | lp->tx_rlen; - - SetMulticastFilter(dev); - - for (i = 0; i < ETH_ALEN; i++) { - lp->init_block.phys_addr[i] = dev->dev_addr[i]; - } - - lp->init_block.mode = 0x0000; /* Enable the Tx and Rx */ -} - - -static void depca_tx_timeout(struct net_device *dev) -{ - u_long ioaddr = dev->base_addr; - - printk("%s: transmit timed out, status %04x, resetting.\n", dev->name, inw(DEPCA_DATA)); - - STOP_DEPCA; - depca_init_ring(dev); - LoadCSRs(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); - InitRestartDepca(dev); -} - - -/* -** Writes a socket buffer to TX descriptor ring and starts transmission -*/ -static netdev_tx_t depca_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_long ioaddr = dev->base_addr; - int status = 0; - - /* Transmitter timeout, serious problems. */ - if (skb->len < 1) - goto out; - - if (skb_padto(skb, ETH_ZLEN)) - goto out; - - netif_stop_queue(dev); - - if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */ - status = load_packet(dev, skb); - - if (!status) { - /* Trigger an immediate send demand. */ - outw(CSR0, DEPCA_ADDR); - outw(INEA | TDMD, DEPCA_DATA); - - dev_kfree_skb(skb); - } - if (TX_BUFFS_AVAIL) - netif_start_queue(dev); - } else - status = NETDEV_TX_LOCKED; - - out: - return status; -} - -/* -** The DEPCA interrupt handler. -*/ -static irqreturn_t depca_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct depca_private *lp; - s16 csr0, nicsr; - u_long ioaddr; - - if (dev == NULL) { - printk("depca_interrupt(): irq %d for unknown device.\n", irq); - return IRQ_NONE; - } - - lp = netdev_priv(dev); - ioaddr = dev->base_addr; - - spin_lock(&lp->lock); - - /* mask the DEPCA board interrupts and turn on the LED */ - nicsr = inb(DEPCA_NICSR); - nicsr |= (IM | LED); - outb(nicsr, DEPCA_NICSR); - - outw(CSR0, DEPCA_ADDR); - csr0 = inw(DEPCA_DATA); - - /* Acknowledge all of the current interrupt sources ASAP. */ - outw(csr0 & INTE, DEPCA_DATA); - - if (csr0 & RINT) /* Rx interrupt (packet arrived) */ - depca_rx(dev); - - if (csr0 & TINT) /* Tx interrupt (packet sent) */ - depca_tx(dev); - - /* Any resources available? */ - if ((TX_BUFFS_AVAIL >= 0) && netif_queue_stopped(dev)) { - netif_wake_queue(dev); - } - - /* Unmask the DEPCA board interrupts and turn off the LED */ - nicsr = (nicsr & ~IM & ~LED); - outb(nicsr, DEPCA_NICSR); - - spin_unlock(&lp->lock); - return IRQ_HANDLED; -} - -/* Called with lp->lock held */ -static int depca_rx(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - int i, entry; - s32 status; - - for (entry = lp->rx_new; !(readl(&lp->rx_ring[entry].base) & R_OWN); entry = lp->rx_new) { - status = readl(&lp->rx_ring[entry].base) >> 16; - if (status & R_STP) { /* Remember start of frame */ - lp->rx_old = entry; - } - if (status & R_ENP) { /* Valid frame status */ - if (status & R_ERR) { /* There was an error. */ - dev->stats.rx_errors++; /* Update the error stats. */ - if (status & R_FRAM) - dev->stats.rx_frame_errors++; - if (status & R_OFLO) - dev->stats.rx_over_errors++; - if (status & R_CRC) - dev->stats.rx_crc_errors++; - if (status & R_BUFF) - dev->stats.rx_fifo_errors++; - } else { - short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4; - struct sk_buff *skb; - - skb = netdev_alloc_skb(dev, pkt_len + 2); - if (skb != NULL) { - unsigned char *buf; - skb_reserve(skb, 2); /* 16 byte align the IP header */ - buf = skb_put(skb, pkt_len); - if (entry < lp->rx_old) { /* Wrapped buffer */ - len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ; - memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len); - memcpy_fromio(buf + len, lp->rx_buff[0], pkt_len - len); - } else { /* Linear buffer */ - memcpy_fromio(buf, lp->rx_buff[lp->rx_old], pkt_len); - } - - /* - ** Notify the upper protocol layers that there is another - ** packet to handle - */ - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - - /* - ** Update stats - */ - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) { - if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) { - lp->pktStats.bins[i]++; - i = DEPCA_PKT_STAT_SZ; - } - } - if (is_multicast_ether_addr(buf)) { - if (is_broadcast_ether_addr(buf)) { - lp->pktStats.broadcast++; - } else { - lp->pktStats.multicast++; - } - } else if (ether_addr_equal(buf, - dev->dev_addr)) { - lp->pktStats.unicast++; - } - - lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ - if (lp->pktStats.bins[0] == 0) { /* Reset counters */ - memset((char *) &lp->pktStats, 0, sizeof(lp->pktStats)); - } - } else { - printk("%s: Memory squeeze, deferring packet.\n", dev->name); - dev->stats.rx_dropped++; /* Really, deferred. */ - break; - } - } - /* Change buffer ownership for this last frame, back to the adapter */ - for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) { - writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base); - } - writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base); - } - - /* - ** Update entry information - */ - lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask; - } - - return 0; -} - -/* -** Buffer sent - check for buffer errors. -** Called with lp->lock held -*/ -static int depca_tx(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - int entry; - s32 status; - u_long ioaddr = dev->base_addr; - - for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) { - status = readl(&lp->tx_ring[entry].base) >> 16; - - if (status < 0) { /* Packet not yet sent! */ - break; - } else if (status & T_ERR) { /* An error occurred. */ - status = readl(&lp->tx_ring[entry].misc); - dev->stats.tx_errors++; - if (status & TMD3_RTRY) - dev->stats.tx_aborted_errors++; - if (status & TMD3_LCAR) - dev->stats.tx_carrier_errors++; - if (status & TMD3_LCOL) - dev->stats.tx_window_errors++; - if (status & TMD3_UFLO) - dev->stats.tx_fifo_errors++; - if (status & (TMD3_BUFF | TMD3_UFLO)) { - /* Trigger an immediate send demand. */ - outw(CSR0, DEPCA_ADDR); - outw(INEA | TDMD, DEPCA_DATA); - } - } else if (status & (T_MORE | T_ONE)) { - dev->stats.collisions++; - } else { - dev->stats.tx_packets++; - } - - /* Update all the pointers */ - lp->tx_old = (lp->tx_old + 1) & lp->txRingMask; - } - - return 0; -} - -static int depca_close(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - s16 nicsr; - u_long ioaddr = dev->base_addr; - - netif_stop_queue(dev); - - outw(CSR0, DEPCA_ADDR); - - if (depca_debug > 1) { - printk("%s: Shutting down ethercard, status was %2.2x.\n", dev->name, inw(DEPCA_DATA)); - } - - /* - ** We stop the DEPCA here -- it occasionally polls - ** memory if we don't. - */ - outw(STOP, DEPCA_DATA); - - /* - ** Give back the ROM in case the user wants to go to DOS - */ - if (lp->adapter != DEPCA) { - nicsr = inb(DEPCA_NICSR); - nicsr &= ~SHE; - outb(nicsr, DEPCA_NICSR); - } - - /* - ** Free the associated irq - */ - free_irq(dev->irq, dev); - return 0; -} - -static void LoadCSRs(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_long ioaddr = dev->base_addr; - - outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */ - outw((u16) lp->device_ram_start, DEPCA_DATA); - outw(CSR2, DEPCA_ADDR); /* initialisation block address MSW */ - outw((u16) (lp->device_ram_start >> 16), DEPCA_DATA); - outw(CSR3, DEPCA_ADDR); /* ALE control */ - outw(ACON, DEPCA_DATA); - - outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */ -} - -static int InitRestartDepca(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_long ioaddr = dev->base_addr; - int i, status = 0; - - /* Copy the shadow init_block to shared memory */ - memcpy_toio(lp->sh_mem, &lp->init_block, sizeof(struct depca_init)); - - outw(CSR0, DEPCA_ADDR); /* point back to CSR0 */ - outw(INIT, DEPCA_DATA); /* initialize DEPCA */ - - /* wait for lance to complete initialisation */ - for (i = 0; (i < 100) && !(inw(DEPCA_DATA) & IDON); i++); - - if (i != 100) { - /* clear IDON by writing a "1", enable interrupts and start lance */ - outw(IDON | INEA | STRT, DEPCA_DATA); - if (depca_debug > 2) { - printk("%s: DEPCA open after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA)); - } - } else { - printk("%s: DEPCA unopen after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA)); - status = -1; - } - - return status; -} - -/* -** Set or clear the multicast filter for this adaptor. -*/ -static void set_multicast_list(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_long ioaddr = dev->base_addr; - - netif_stop_queue(dev); - while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */ - - STOP_DEPCA; /* Temporarily stop the depca. */ - depca_init_ring(dev); /* Initialize the descriptor rings */ - - if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */ - lp->init_block.mode |= PROM; - } else { - SetMulticastFilter(dev); - lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */ - } - - LoadCSRs(dev); /* Reload CSR3 */ - InitRestartDepca(dev); /* Resume normal operation. */ - netif_start_queue(dev); /* Unlock the TX ring */ -} - -/* -** Calculate the hash code and update the logical address filter -** from a list of ethernet multicast addresses. -** Big endian crc one liner is mine, all mine, ha ha ha ha! -** LANCE calculates its hash codes big endian. -*/ -static void SetMulticastFilter(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - struct netdev_hw_addr *ha; - int i, j, bit, byte; - u16 hashcode; - u32 crc; - - if (dev->flags & IFF_ALLMULTI) { /* Set all multicast bits */ - for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { - lp->init_block.mcast_table[i] = (char) 0xff; - } - } else { - for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { /* Clear the multicast table */ - lp->init_block.mcast_table[i] = 0; - } - /* Add multicast addresses */ - netdev_for_each_mc_addr(ha, dev) { - crc = ether_crc(ETH_ALEN, ha->addr); - hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */ - for (j = 0; j < 5; j++) { /* ... in reverse order. */ - hashcode = (hashcode << 1) | ((crc >>= 1) & 1); - } - - byte = hashcode >> 3; /* bit[3-5] -> byte in filter */ - bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */ - lp->init_block.mcast_table[byte] |= bit; - } - } -} - -static int __init depca_common_init (u_long ioaddr, struct net_device **devp) -{ - int status = 0; - - if (!request_region (ioaddr, DEPCA_TOTAL_SIZE, depca_string)) { - status = -EBUSY; - goto out; - } - - if (DevicePresent(ioaddr)) { - status = -ENODEV; - goto out_release; - } - - if (!(*devp = alloc_etherdev (sizeof (struct depca_private)))) { - status = -ENOMEM; - goto out_release; - } - - return 0; - - out_release: - release_region (ioaddr, DEPCA_TOTAL_SIZE); - out: - return status; -} - -/* -** ISA bus I/O device probe -*/ - -static void __init depca_platform_probe (void) -{ - int i; - struct platform_device *pldev; - - for (i = 0; depca_io_ports[i].iobase; i++) { - depca_io_ports[i].device = NULL; - - /* if an address has been specified on the command - * line, use it (if valid) */ - if (io && io != depca_io_ports[i].iobase) - continue; - - pldev = platform_device_alloc(depca_string, i); - if (!pldev) - continue; - - pldev->dev.platform_data = (void *) depca_io_ports[i].iobase; - depca_io_ports[i].device = pldev; - - if (platform_device_add(pldev)) { - depca_io_ports[i].device = NULL; - pldev->dev.platform_data = NULL; - platform_device_put(pldev); - continue; - } - - if (!pldev->dev.driver) { - /* The driver was not bound to this device, there was - * no hardware at this address. Unregister it, as the - * release function will take care of freeing the - * allocated structure */ - - depca_io_ports[i].device = NULL; - pldev->dev.platform_data = NULL; - platform_device_unregister (pldev); - } - } -} - -static enum depca_type __init depca_shmem_probe (ulong *mem_start) -{ - u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES; - enum depca_type adapter = unknown; - int i; - - for (i = 0; mem_base[i]; i++) { - *mem_start = mem ? mem : mem_base[i]; - adapter = DepcaSignature (adapter_name, *mem_start); - if (adapter != unknown) - break; - } - - return adapter; -} - -static int depca_isa_probe(struct platform_device *device) -{ - struct net_device *dev; - struct depca_private *lp; - u_long ioaddr, mem_start = 0; - enum depca_type adapter = unknown; - int status = 0; - - ioaddr = (u_long) device->dev.platform_data; - - if ((status = depca_common_init (ioaddr, &dev))) - goto out; - - adapter = depca_shmem_probe (&mem_start); - - if (adapter == unknown) { - status = -ENODEV; - goto out_free; - } - - dev->base_addr = ioaddr; - dev->irq = irq; /* Use whatever value the user gave - * us, and 0 if he didn't. */ - lp = netdev_priv(dev); - lp->depca_bus = DEPCA_BUS_ISA; - lp->adapter = adapter; - lp->mem_start = mem_start; - - if ((status = depca_hw_init(dev, &device->dev))) - goto out_free; - - return 0; - - out_free: - free_netdev (dev); - release_region (ioaddr, DEPCA_TOTAL_SIZE); - out: - return status; -} - -/* -** EISA callbacks from sysfs. -*/ - -#ifdef CONFIG_EISA -static int __init depca_eisa_probe (struct device *device) -{ - enum depca_type adapter = unknown; - struct eisa_device *edev; - struct net_device *dev; - struct depca_private *lp; - u_long ioaddr, mem_start; - int status = 0; - - edev = to_eisa_device (device); - ioaddr = edev->base_addr + DEPCA_EISA_IO_PORTS; - - if ((status = depca_common_init (ioaddr, &dev))) - goto out; - - /* It would have been nice to get card configuration from the - * card. Unfortunately, this register is write-only (shares - * it's address with the ethernet prom)... As we don't parse - * the EISA configuration structures (yet... :-), just rely on - * the ISA probing to sort it out... */ - - adapter = depca_shmem_probe (&mem_start); - if (adapter == unknown) { - status = -ENODEV; - goto out_free; - } - - dev->base_addr = ioaddr; - dev->irq = irq; - lp = netdev_priv(dev); - lp->depca_bus = DEPCA_BUS_EISA; - lp->adapter = edev->id.driver_data; - lp->mem_start = mem_start; - - if ((status = depca_hw_init(dev, device))) - goto out_free; - - return 0; - - out_free: - free_netdev (dev); - release_region (ioaddr, DEPCA_TOTAL_SIZE); - out: - return status; -} -#endif - -static int depca_device_remove(struct device *device) -{ - struct net_device *dev; - struct depca_private *lp; - int bus; - - dev = dev_get_drvdata(device); - lp = netdev_priv(dev); - - unregister_netdev (dev); - iounmap (lp->sh_mem); - release_mem_region (lp->mem_start, lp->mem_len); - release_region (dev->base_addr, DEPCA_TOTAL_SIZE); - bus = lp->depca_bus; - free_netdev (dev); - - return 0; -} - -/* -** Look for a particular board name in the on-board Remote Diagnostics -** and Boot (readb) ROM. This will also give us a clue to the network RAM -** base address. -*/ -static int __init DepcaSignature(char *name, u_long base_addr) -{ - u_int i, j, k; - void __iomem *ptr; - char tmpstr[16]; - u_long prom_addr = base_addr + 0xc000; - u_long mem_addr = base_addr + 0x8000; /* 32KB */ - - /* Can't reserve the prom region, it is already marked as - * used, at least on x86. Instead, reserve a memory region a - * board would certainly use. If it works, go ahead. If not, - * run like hell... */ - - if (!request_mem_region (mem_addr, 16, depca_string)) - return unknown; - - /* Copy the first 16 bytes of ROM */ - - ptr = ioremap(prom_addr, 16); - if (ptr == NULL) { - printk(KERN_ERR "depca: I/O remap failed at %lx\n", prom_addr); - return unknown; - } - for (i = 0; i < 16; i++) { - tmpstr[i] = readb(ptr + i); - } - iounmap(ptr); - - release_mem_region (mem_addr, 16); - - /* Check if PROM contains a valid string */ - for (i = 0; *depca_signature[i] != '\0'; i++) { - for (j = 0, k = 0; j < 16 && k < strlen(depca_signature[i]); j++) { - if (depca_signature[i][k] == tmpstr[j]) { /* track signature */ - k++; - } else { /* lost signature; begin search again */ - k = 0; - } - } - if (k == strlen(depca_signature[i])) - break; - } - - /* Check if name string is valid, provided there's no PROM */ - if (name && *name && (i == unknown)) { - for (i = 0; *depca_signature[i] != '\0'; i++) { - if (strcmp(name, depca_signature[i]) == 0) - break; - } - } - - return i; -} - -/* -** Look for a special sequence in the Ethernet station address PROM that -** is common across all DEPCA products. Note that the original DEPCA needs -** its ROM address counter to be initialized and enabled. Only enable -** if the first address octet is a 0x08 - this minimises the chances of -** messing around with some other hardware, but it assumes that this DEPCA -** card initialized itself correctly. -** -** Search the Ethernet address ROM for the signature. Since the ROM address -** counter can start at an arbitrary point, the search must include the entire -** probe sequence length plus the (length_of_the_signature - 1). -** Stop the search IMMEDIATELY after the signature is found so that the -** PROM address counter is correctly positioned at the start of the -** ethernet address for later read out. -*/ -static int __init DevicePresent(u_long ioaddr) -{ - union { - struct { - u32 a; - u32 b; - } llsig; - char Sig[sizeof(u32) << 1]; - } - dev; - short sigLength = 0; - s8 data; - s16 nicsr; - int i, j, status = 0; - - data = inb(DEPCA_PROM); /* clear counter on DEPCA */ - data = inb(DEPCA_PROM); /* read data */ - - if (data == 0x08) { /* Enable counter on DEPCA */ - nicsr = inb(DEPCA_NICSR); - nicsr |= AAC; - outb(nicsr, DEPCA_NICSR); - } - - dev.llsig.a = ETH_PROM_SIG; - dev.llsig.b = ETH_PROM_SIG; - sigLength = sizeof(u32) << 1; - - for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) { - data = inb(DEPCA_PROM); - if (dev.Sig[j] == data) { /* track signature */ - j++; - } else { /* lost signature; begin search again */ - if (data == dev.Sig[0]) { /* rare case.... */ - j = 1; - } else { - j = 0; - } - } - } - - if (j != sigLength) { - status = -ENODEV; /* search failed */ - } - - return status; -} - -/* -** The DE100 and DE101 PROM accesses were made non-standard for some bizarre -** reason: access the upper half of the PROM with x=0; access the lower half -** with x=1. -*/ -static int __init get_hw_addr(struct net_device *dev) -{ - u_long ioaddr = dev->base_addr; - struct depca_private *lp = netdev_priv(dev); - int i, k, tmp, status = 0; - u_short j, x, chksum; - - x = (((lp->adapter == de100) || (lp->adapter == de101)) ? 1 : 0); - - for (i = 0, k = 0, j = 0; j < 3; j++) { - k <<= 1; - if (k > 0xffff) - k -= 0xffff; - - k += (u_char) (tmp = inb(DEPCA_PROM + x)); - dev->dev_addr[i++] = (u_char) tmp; - k += (u_short) ((tmp = inb(DEPCA_PROM + x)) << 8); - dev->dev_addr[i++] = (u_char) tmp; - - if (k > 0xffff) - k -= 0xffff; - } - if (k == 0xffff) - k = 0; - - chksum = (u_char) inb(DEPCA_PROM + x); - chksum |= (u_short) (inb(DEPCA_PROM + x) << 8); - if (k != chksum) - status = -1; - - return status; -} - -/* -** Load a packet into the shared memory -*/ -static int load_packet(struct net_device *dev, struct sk_buff *skb) -{ - struct depca_private *lp = netdev_priv(dev); - int i, entry, end, len, status = NETDEV_TX_OK; - - entry = lp->tx_new; /* Ring around buffer number. */ - end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask; - if (!(readl(&lp->tx_ring[end].base) & T_OWN)) { /* Enough room? */ - /* - ** Caution: the write order is important here... don't set up the - ** ownership rights until all the other information is in place. - */ - if (end < entry) { /* wrapped buffer */ - len = (lp->txRingMask - entry + 1) * TX_BUFF_SZ; - memcpy_toio(lp->tx_buff[entry], skb->data, len); - memcpy_toio(lp->tx_buff[0], skb->data + len, skb->len - len); - } else { /* linear buffer */ - memcpy_toio(lp->tx_buff[entry], skb->data, skb->len); - } - - /* set up the buffer descriptors */ - len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; - for (i = entry; i != end; i = (i+1) & lp->txRingMask) { - /* clean out flags */ - writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base); - writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */ - writew(-TX_BUFF_SZ, &lp->tx_ring[i].length); /* packet length in buffer */ - len -= TX_BUFF_SZ; - } - /* clean out flags */ - writel(readl(&lp->tx_ring[end].base) & ~T_FLAGS, &lp->tx_ring[end].base); - writew(0x0000, &lp->tx_ring[end].misc); /* clears other error flags */ - writew(-len, &lp->tx_ring[end].length); /* packet length in last buff */ - - /* start of packet */ - writel(readl(&lp->tx_ring[entry].base) | T_STP, &lp->tx_ring[entry].base); - /* end of packet */ - writel(readl(&lp->tx_ring[end].base) | T_ENP, &lp->tx_ring[end].base); - - for (i = end; i != entry; --i) { - /* ownership of packet */ - writel(readl(&lp->tx_ring[i].base) | T_OWN, &lp->tx_ring[i].base); - if (i == 0) - i = lp->txRingMask + 1; - } - writel(readl(&lp->tx_ring[entry].base) | T_OWN, &lp->tx_ring[entry].base); - - lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */ - } else { - status = NETDEV_TX_LOCKED; - } - - return status; -} - -static void depca_dbg_open(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_long ioaddr = dev->base_addr; - struct depca_init *p = &lp->init_block; - int i; - - if (depca_debug > 1) { - /* Do not copy the shadow init block into shared memory */ - /* Debugging should not affect normal operation! */ - /* The shadow init block will get copied across during InitRestartDepca */ - printk("%s: depca open with irq %d\n", dev->name, dev->irq); - printk("Descriptor head addresses (CPU):\n"); - printk(" 0x%lx 0x%lx\n", (u_long) lp->rx_ring, (u_long) lp->tx_ring); - printk("Descriptor addresses (CPU):\nRX: "); - for (i = 0; i < lp->rxRingMask; i++) { - if (i < 3) { - printk("%p ", &lp->rx_ring[i].base); - } - } - printk("...%p\n", &lp->rx_ring[i].base); - printk("TX: "); - for (i = 0; i < lp->txRingMask; i++) { - if (i < 3) { - printk("%p ", &lp->tx_ring[i].base); - } - } - printk("...%p\n", &lp->tx_ring[i].base); - printk("\nDescriptor buffers (Device):\nRX: "); - for (i = 0; i < lp->rxRingMask; i++) { - if (i < 3) { - printk("0x%8.8x ", readl(&lp->rx_ring[i].base)); - } - } - printk("...0x%8.8x\n", readl(&lp->rx_ring[i].base)); - printk("TX: "); - for (i = 0; i < lp->txRingMask; i++) { - if (i < 3) { - printk("0x%8.8x ", readl(&lp->tx_ring[i].base)); - } - } - printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base)); - printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start); - printk(" mode: 0x%4.4x\n", p->mode); - printk(" physical address: %pM\n", p->phys_addr); - printk(" multicast hash table: "); - for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) { - printk("%2.2x:", p->mcast_table[i]); - } - printk("%2.2x\n", p->mcast_table[i]); - printk(" rx_ring at: 0x%8.8x\n", p->rx_ring); - printk(" tx_ring at: 0x%8.8x\n", p->tx_ring); - printk("buffers (Phys): 0x%8.8lx\n", lp->mem_start + lp->buffs_offset); - printk("Ring size:\nRX: %d Log2(rxRingMask): 0x%8.8x\n", (int) lp->rxRingMask + 1, lp->rx_rlen); - printk("TX: %d Log2(txRingMask): 0x%8.8x\n", (int) lp->txRingMask + 1, lp->tx_rlen); - outw(CSR2, DEPCA_ADDR); - printk("CSR2&1: 0x%4.4x", inw(DEPCA_DATA)); - outw(CSR1, DEPCA_ADDR); - printk("%4.4x\n", inw(DEPCA_DATA)); - outw(CSR3, DEPCA_ADDR); - printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA)); - } -} - -/* -** Perform IOCTL call functions here. Some are privileged operations and the -** effective uid is checked in those cases. -** All multicast IOCTLs will not work here and are for testing purposes only. -*/ -static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct depca_private *lp = netdev_priv(dev); - struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_ifru; - int i, status = 0; - u_long ioaddr = dev->base_addr; - union { - u8 addr[(HASH_TABLE_LEN * ETH_ALEN)]; - u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1]; - u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2]; - } tmp; - unsigned long flags; - void *buf; - - switch (ioc->cmd) { - case DEPCA_GET_HWADDR: /* Get the hardware address */ - for (i = 0; i < ETH_ALEN; i++) { - tmp.addr[i] = dev->dev_addr[i]; - } - ioc->len = ETH_ALEN; - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) - return -EFAULT; - break; - - case DEPCA_SET_HWADDR: /* Set the hardware address */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) - return -EFAULT; - for (i = 0; i < ETH_ALEN; i++) { - dev->dev_addr[i] = tmp.addr[i]; - } - netif_stop_queue(dev); - while (lp->tx_old != lp->tx_new) - cpu_relax(); /* Wait for the ring to empty */ - - STOP_DEPCA; /* Temporarily stop the depca. */ - depca_init_ring(dev); /* Initialize the descriptor rings */ - LoadCSRs(dev); /* Reload CSR3 */ - InitRestartDepca(dev); /* Resume normal operation. */ - netif_start_queue(dev); /* Unlock the TX ring */ - break; - - case DEPCA_SET_PROM: /* Set Promiscuous Mode */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - netif_stop_queue(dev); - while (lp->tx_old != lp->tx_new) - cpu_relax(); /* Wait for the ring to empty */ - - STOP_DEPCA; /* Temporarily stop the depca. */ - depca_init_ring(dev); /* Initialize the descriptor rings */ - lp->init_block.mode |= PROM; /* Set promiscuous mode */ - - LoadCSRs(dev); /* Reload CSR3 */ - InitRestartDepca(dev); /* Resume normal operation. */ - netif_start_queue(dev); /* Unlock the TX ring */ - break; - - case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - netif_stop_queue(dev); - while (lp->tx_old != lp->tx_new) - cpu_relax(); /* Wait for the ring to empty */ - - STOP_DEPCA; /* Temporarily stop the depca. */ - depca_init_ring(dev); /* Initialize the descriptor rings */ - lp->init_block.mode &= ~PROM; /* Clear promiscuous mode */ - - LoadCSRs(dev); /* Reload CSR3 */ - InitRestartDepca(dev); /* Resume normal operation. */ - netif_start_queue(dev); /* Unlock the TX ring */ - break; - - case DEPCA_SAY_BOO: /* Say "Boo!" to the kernel log file */ - if(!capable(CAP_NET_ADMIN)) - return -EPERM; - printk("%s: Boo!\n", dev->name); - break; - - case DEPCA_GET_MCA: /* Get the multicast address table */ - ioc->len = (HASH_TABLE_LEN >> 3); - if (copy_to_user(ioc->data, lp->init_block.mcast_table, ioc->len)) - return -EFAULT; - break; - - case DEPCA_SET_MCA: /* Set a multicast address */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (ioc->len >= HASH_TABLE_LEN) - return -EINVAL; - if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len)) - return -EFAULT; - set_multicast_list(dev); - break; - - case DEPCA_CLR_MCA: /* Clear all multicast addresses */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - set_multicast_list(dev); - break; - - case DEPCA_MCA_EN: /* Enable pass all multicast addressing */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - set_multicast_list(dev); - break; - - case DEPCA_GET_STATS: /* Get the driver statistics */ - ioc->len = sizeof(lp->pktStats); - buf = kmalloc(ioc->len, GFP_KERNEL); - if(!buf) - return -ENOMEM; - spin_lock_irqsave(&lp->lock, flags); - memcpy(buf, &lp->pktStats, ioc->len); - spin_unlock_irqrestore(&lp->lock, flags); - if (copy_to_user(ioc->data, buf, ioc->len)) - status = -EFAULT; - kfree(buf); - break; - - case DEPCA_CLR_STATS: /* Zero out the driver statistics */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - spin_lock_irqsave(&lp->lock, flags); - memset(&lp->pktStats, 0, sizeof(lp->pktStats)); - spin_unlock_irqrestore(&lp->lock, flags); - break; - - case DEPCA_GET_REG: /* Get the DEPCA Registers */ - i = 0; - tmp.sval[i++] = inw(DEPCA_NICSR); - outw(CSR0, DEPCA_ADDR); /* status register */ - tmp.sval[i++] = inw(DEPCA_DATA); - memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init)); - ioc->len = i + sizeof(struct depca_init); - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) - return -EFAULT; - break; - - default: - return -EOPNOTSUPP; - } - - return status; -} - -static int __init depca_module_init (void) -{ - int err = 0; - -#ifdef CONFIG_EISA - err = eisa_driver_register(&depca_eisa_driver); - if (err) - goto err_eisa; -#endif - err = platform_driver_register(&depca_isa_driver); - if (err) - goto err_eisa; - - depca_platform_probe(); - return 0; - -err_eisa: -#ifdef CONFIG_EISA - eisa_driver_unregister(&depca_eisa_driver); -#endif - return err; -} - -static void __exit depca_module_exit (void) -{ - int i; -#ifdef CONFIG_EISA - eisa_driver_unregister (&depca_eisa_driver); -#endif - platform_driver_unregister (&depca_isa_driver); - - for (i = 0; depca_io_ports[i].iobase; i++) { - if (depca_io_ports[i].device) { - depca_io_ports[i].device->dev.platform_data = NULL; - platform_device_unregister (depca_io_ports[i].device); - depca_io_ports[i].device = NULL; - } - } -} - -module_init (depca_module_init); -module_exit (depca_module_exit); diff --git a/drivers/net/ethernet/amd/depca.h b/drivers/net/ethernet/amd/depca.h deleted file mode 100644 index cdcfe4252c16..000000000000 --- a/drivers/net/ethernet/amd/depca.h +++ /dev/null @@ -1,183 +0,0 @@ -/* - Written 1994 by David C. Davies. - - Copyright 1994 David C. Davies. This software may be used and distributed - according to the terms of the GNU General Public License, incorporated herein by - reference. -*/ - -/* -** I/O addresses. Note that the 2k buffer option is not supported in -** this driver. -*/ -#define DEPCA_NICSR ioaddr+0x00 /* Network interface CSR */ -#define DEPCA_RBI ioaddr+0x02 /* RAM buffer index (2k buffer mode) */ -#define DEPCA_DATA ioaddr+0x04 /* LANCE registers' data port */ -#define DEPCA_ADDR ioaddr+0x06 /* LANCE registers' address port */ -#define DEPCA_HBASE ioaddr+0x08 /* EISA high memory base address reg. */ -#define DEPCA_PROM ioaddr+0x0c /* Ethernet address ROM data port */ -#define DEPCA_CNFG ioaddr+0x0c /* EISA Configuration port */ -#define DEPCA_RBSA ioaddr+0x0e /* RAM buffer starting address (2k buff.) */ - -/* -** These are LANCE registers addressable through DEPCA_ADDR -*/ -#define CSR0 0 -#define CSR1 1 -#define CSR2 2 -#define CSR3 3 - -/* -** NETWORK INTERFACE CSR (NI_CSR) bit definitions -*/ - -#define TO 0x0100 /* Time Out for remote boot */ -#define SHE 0x0080 /* SHadow memory Enable */ -#define BS 0x0040 /* Bank Select */ -#define BUF 0x0020 /* BUFfer size (1->32k, 0->64k) */ -#define RBE 0x0010 /* Remote Boot Enable (1->net boot) */ -#define AAC 0x0008 /* Address ROM Address Counter (1->enable) */ -#define _128KB 0x0008 /* 128kB Network RAM (1->enable) */ -#define IM 0x0004 /* Interrupt Mask (1->mask) */ -#define IEN 0x0002 /* Interrupt tristate ENable (1->enable) */ -#define LED 0x0001 /* LED control */ - -/* -** Control and Status Register 0 (CSR0) bit definitions -*/ - -#define ERR 0x8000 /* Error summary */ -#define BABL 0x4000 /* Babble transmitter timeout error */ -#define CERR 0x2000 /* Collision Error */ -#define MISS 0x1000 /* Missed packet */ -#define MERR 0x0800 /* Memory Error */ -#define RINT 0x0400 /* Receiver Interrupt */ -#define TINT 0x0200 /* Transmit Interrupt */ -#define IDON 0x0100 /* Initialization Done */ -#define INTR 0x0080 /* Interrupt Flag */ -#define INEA 0x0040 /* Interrupt Enable */ -#define RXON 0x0020 /* Receiver on */ -#define TXON 0x0010 /* Transmitter on */ -#define TDMD 0x0008 /* Transmit Demand */ -#define STOP 0x0004 /* Stop */ -#define STRT 0x0002 /* Start */ -#define INIT 0x0001 /* Initialize */ -#define INTM 0xff00 /* Interrupt Mask */ -#define INTE 0xfff0 /* Interrupt Enable */ - -/* -** CONTROL AND STATUS REGISTER 3 (CSR3) -*/ - -#define BSWP 0x0004 /* Byte SWaP */ -#define ACON 0x0002 /* ALE control */ -#define BCON 0x0001 /* Byte CONtrol */ - -/* -** Initialization Block Mode Register -*/ - -#define PROM 0x8000 /* Promiscuous Mode */ -#define EMBA 0x0080 /* Enable Modified Back-off Algorithm */ -#define INTL 0x0040 /* Internal Loopback */ -#define DRTY 0x0020 /* Disable Retry */ -#define COLL 0x0010 /* Force Collision */ -#define DTCR 0x0008 /* Disable Transmit CRC */ -#define LOOP 0x0004 /* Loopback */ -#define DTX 0x0002 /* Disable the Transmitter */ -#define DRX 0x0001 /* Disable the Receiver */ - -/* -** Receive Message Descriptor 1 (RMD1) bit definitions. -*/ - -#define R_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */ -#define R_ERR 0x4000 /* Error Summary */ -#define R_FRAM 0x2000 /* Framing Error */ -#define R_OFLO 0x1000 /* Overflow Error */ -#define R_CRC 0x0800 /* CRC Error */ -#define R_BUFF 0x0400 /* Buffer Error */ -#define R_STP 0x0200 /* Start of Packet */ -#define R_ENP 0x0100 /* End of Packet */ - -/* -** Transmit Message Descriptor 1 (TMD1) bit definitions. -*/ - -#define T_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */ -#define T_ERR 0x4000 /* Error Summary */ -#define T_ADD_FCS 0x2000 /* More the 1 retry needed to Xmit */ -#define T_MORE 0x1000 /* >1 retry to transmit packet */ -#define T_ONE 0x0800 /* 1 try needed to transmit the packet */ -#define T_DEF 0x0400 /* Deferred */ -#define T_STP 0x02000000 /* Start of Packet */ -#define T_ENP 0x01000000 /* End of Packet */ -#define T_FLAGS 0xff000000 /* TX Flags Field */ - -/* -** Transmit Message Descriptor 3 (TMD3) bit definitions. -*/ - -#define TMD3_BUFF 0x8000 /* BUFFer error */ -#define TMD3_UFLO 0x4000 /* UnderFLOw error */ -#define TMD3_RES 0x2000 /* REServed */ -#define TMD3_LCOL 0x1000 /* Late COLlision */ -#define TMD3_LCAR 0x0800 /* Loss of CARrier */ -#define TMD3_RTRY 0x0400 /* ReTRY error */ - -/* -** EISA configuration Register (CNFG) bit definitions -*/ - -#define TIMEOUT 0x0100 /* 0:2.5 mins, 1: 30 secs */ -#define REMOTE 0x0080 /* Remote Boot Enable -> 1 */ -#define IRQ11 0x0040 /* Enable -> 1 */ -#define IRQ10 0x0020 /* Enable -> 1 */ -#define IRQ9 0x0010 /* Enable -> 1 */ -#define IRQ5 0x0008 /* Enable -> 1 */ -#define BUFF 0x0004 /* 0: 64kB or 128kB, 1: 32kB */ -#define PADR16 0x0002 /* RAM on 64kB boundary */ -#define PADR17 0x0001 /* RAM on 128kB boundary */ - -/* -** Miscellaneous -*/ -#define HASH_TABLE_LEN 64 /* Bits */ -#define HASH_BITS 0x003f /* 6 LS bits */ - -#define MASK_INTERRUPTS 1 -#define UNMASK_INTERRUPTS 0 - -#define EISA_EN 0x0001 /* Enable EISA bus buffers */ -#define EISA_ID iobase+0x0080 /* ID long word for EISA card */ -#define EISA_CTRL iobase+0x0084 /* Control word for EISA card */ - -/* -** Include the IOCTL stuff -*/ -#include <linux/sockios.h> - -struct depca_ioctl { - unsigned short cmd; /* Command to run */ - unsigned short len; /* Length of the data buffer */ - unsigned char __user *data; /* Pointer to the data buffer */ -}; - -/* -** Recognised commands for the driver -*/ -#define DEPCA_GET_HWADDR 0x01 /* Get the hardware address */ -#define DEPCA_SET_HWADDR 0x02 /* Get the hardware address */ -#define DEPCA_SET_PROM 0x03 /* Set Promiscuous Mode */ -#define DEPCA_CLR_PROM 0x04 /* Clear Promiscuous Mode */ -#define DEPCA_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */ -#define DEPCA_GET_MCA 0x06 /* Get a multicast address */ -#define DEPCA_SET_MCA 0x07 /* Set a multicast address */ -#define DEPCA_CLR_MCA 0x08 /* Clear a multicast address */ -#define DEPCA_MCA_EN 0x09 /* Enable a multicast address group */ -#define DEPCA_GET_STATS 0x0a /* Get the driver statistics */ -#define DEPCA_CLR_STATS 0x0b /* Zero out the driver statistics */ -#define DEPCA_GET_REG 0x0c /* Get the Register contents */ -#define DEPCA_SET_REG 0x0d /* Set the Register contents */ -#define DEPCA_DUMP 0x0f /* Dump the DEPCA Status */ - diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index a227ccdcb9b5..797f847edf13 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -494,19 +494,15 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev, } memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); - new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), - GFP_ATOMIC); - if (!new_dma_addr_list) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); + new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), + GFP_ATOMIC); + if (!new_dma_addr_list) goto free_new_tx_ring; - } - new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), - GFP_ATOMIC); - if (!new_skb_list) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); + new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!new_skb_list) goto free_new_lists; - } kfree(lp->tx_skbuff); kfree(lp->tx_dma_addr); @@ -564,19 +560,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, } memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); - new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), - GFP_ATOMIC); - if (!new_dma_addr_list) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); + new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC); + if (!new_dma_addr_list) goto free_new_rx_ring; - } - new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), - GFP_ATOMIC); - if (!new_skb_list) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); + new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!new_skb_list) goto free_new_lists; - } /* first copy the current receive buffers */ overlap = min(size, lp->rx_ring_size); @@ -1688,10 +1679,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) memcpy(dev->dev_addr, promaddr, 6); } } - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ - if (!is_valid_ether_addr(dev->perm_addr)) + if (!is_valid_ether_addr(dev->dev_addr)) memset(dev->dev_addr, 0, ETH_ALEN); if (pcnet32_debug & NETIF_MSG_PROBE) { @@ -1934,31 +1924,23 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name) lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), GFP_ATOMIC); - if (!lp->tx_dma_addr) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); + if (!lp->tx_dma_addr) return -ENOMEM; - } lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), GFP_ATOMIC); - if (!lp->rx_dma_addr) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); + if (!lp->rx_dma_addr) return -ENOMEM; - } lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), GFP_ATOMIC); - if (!lp->tx_skbuff) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); + if (!lp->tx_skbuff) return -ENOMEM; - } lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), GFP_ATOMIC); - if (!lp->rx_skbuff) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); + if (!lp->rx_skbuff) return -ENOMEM; - } return 0; } diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index c2d696c88e46..6a40290d3727 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1284,8 +1284,8 @@ static void lance_free_hwresources(struct lance_private *lp) /* Ethtool support... */ static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, "sunlance"); - strcpy(info->version, "2.02"); + strlcpy(info->driver, "sunlance", sizeof(info->driver)); + strlcpy(info->version, "2.02", sizeof(info->version)); } static const struct ethtool_ops sparc_lance_ethtool_ops = { diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 0035c01660b6..1f07fc633ab9 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -472,7 +472,6 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); - netdev->addr_assign_type &= ~NET_ADDR_RANDOM; atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); @@ -983,11 +982,9 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 + rfd_ring->count); tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); - if (unlikely(!tpd_ring->buffer_info)) { - dev_err(&pdev->dev, "kzalloc failed, size = %d\n", - size); + if (unlikely(!tpd_ring->buffer_info)) goto err_nomem; - } + for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { tpd_ring[i].buffer_info = (tpd_ring->buffer_info + count); @@ -2075,7 +2072,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter, if (unlikely(pci_dma_mapping_error(adapter->pdev, buffer_info->dma))) goto err_dma; - + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, ATL1C_PCIMAP_TODEVICE); mapped_len += map_len; @@ -2597,10 +2594,9 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } if (atl1c_read_mac_addr(&adapter->hw)) { /* got a random MAC address, set NET_ADDR_RANDOM to netdev */ - netdev->addr_assign_type |= NET_ADDR_RANDOM; + netdev->addr_assign_type = NET_ADDR_RANDOM; } memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); if (netif_msg_probe(adapter)) dev_dbg(&pdev->dev, "mac address : %pM\n", adapter->hw.mac_addr); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index e4466a36d106..92f4734f860d 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -819,8 +819,6 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count); tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL); if (tx_ring->tx_buffer == NULL) { - netdev_err(adapter->netdev, "kzalloc failed, size = D%d\n", - size); err = -ENOMEM; goto failed; } @@ -2342,7 +2340,6 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr); INIT_WORK(&adapter->reset_task, atl1e_reset_task); diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 71b3d7daa21d..5b0d9931c720 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -3053,7 +3053,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* copy the MAC address out of the EEPROM */ if (atl1_read_mac_addr(&adapter->hw)) { /* mark random mac */ - netdev->addr_assign_type |= NET_ADDR_RANDOM; + netdev->addr_assign_type = NET_ADDR_RANDOM; } memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index aab83a2d4e07..1278b47022e0 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -1433,14 +1433,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* copy the MAC address out of the EEPROM */ atl2_read_mac_addr(&adapter->hw); memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); -/* FIXME: do we still need this? */ -#ifdef ETHTOOL_GPERMADDR - memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); - - if (!is_valid_ether_addr(netdev->perm_addr)) { -#else if (!is_valid_ether_addr(netdev->dev_addr)) { -#endif err = -EIO; goto err_eeprom; } diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c index 77ffbc4a5071..f82eb1699464 100644 --- a/drivers/net/ethernet/atheros/atlx/atlx.c +++ b/drivers/net/ethernet/atheros/atlx/atlx.c @@ -84,7 +84,6 @@ static int atlx_set_mac(struct net_device *netdev, void *p) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); - netdev->addr_assign_type &= ~NET_ADDR_RANDOM; atlx_set_mac_addr(&adapter->hw); return 0; diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index f55267363f35..3e69b3f88099 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -121,4 +121,22 @@ config BNX2X To compile this driver as a module, choose M here: the module will be called bnx2x. This is recommended. +config BNX2X_SRIOV + bool "Broadcom 578xx and 57712 SR-IOV support" + depends on BNX2X && PCI_IOV + default y + ---help--- + This configuration parameter enables Single Root Input Output + Virtualization support in the 578xx and 57712 products. This + allows for virtual function acceleration in virtual environments. + +config BGMAC + tristate "BCMA bus GBit core support" + depends on BCMA_HOST_SOC && HAS_DMA + ---help--- + This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. + They can be found on BCM47xx SoCs and provide gigabit ethernet. + In case of using this driver on BCM4706 it's also requires to enable + BCMA_DRIVER_GMAC_CMN to make it work. + endif # NET_VENDOR_BROADCOM diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile index b7896051d54e..68efa1a3fb88 100644 --- a/drivers/net/ethernet/broadcom/Makefile +++ b/drivers/net/ethernet/broadcom/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_CNIC) += cnic.o obj-$(CONFIG_BNX2X) += bnx2x/ obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o obj-$(CONFIG_TIGON3) += tg3.o +obj-$(CONFIG_BGMAC) += bgmac.o diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 219f6226fcb1..a7efec293037 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -809,11 +809,10 @@ static int b44_rx(struct b44 *bp, int budget) struct sk_buff *copy_skb; b44_recycle_rx(bp, cons, bp->rx_prod); - copy_skb = netdev_alloc_skb(bp->dev, len + 2); + copy_skb = netdev_alloc_skb_ip_align(bp->dev, len); if (copy_skb == NULL) goto drop_it_no_recycle; - skb_reserve(copy_skb, 2); skb_put(copy_skb, len); /* DMA sync done above, copy just the actual packet */ skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET, @@ -1518,10 +1517,8 @@ static void b44_setup_pseudo_magicp(struct b44 *bp) u8 pwol_mask[B44_PMASK_SIZE]; pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL); - if (!pwol_pattern) { - pr_err("Memory not available for WOL\n"); + if (!pwol_pattern) return; - } /* Ipv4 magic packet pattern - pattern 0.*/ memset(pwol_mask, 0, B44_PMASK_SIZE); @@ -2111,8 +2108,6 @@ static int b44_get_invariants(struct b44 *bp) return -EINVAL; } - memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len); - bp->imask = IMASK_DEF; /* XXX - really required? diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 39387d67b722..7d81e059e811 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -799,7 +799,7 @@ static int bcm_enet_open(struct net_device *dev) snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->mii_bus->id, priv->phy_id); - phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0, + phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -886,10 +886,9 @@ static int bcm_enet_open(struct net_device *dev) priv->tx_desc_alloc_size = size; priv->tx_desc_cpu = p; - priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, + priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *), GFP_KERNEL); if (!priv->tx_skb) { - dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out_free_tx_ring; } @@ -900,10 +899,9 @@ static int bcm_enet_open(struct net_device *dev) spin_lock_init(&priv->tx_lock); /* init & fill rx ring with skbs */ - priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, + priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *), GFP_KERNEL); if (!priv->rx_skb) { - dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out_free_tx_skb; } @@ -1227,10 +1225,11 @@ static const u32 unused_mib_regs[] = { static void bcm_enet_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { - strncpy(drvinfo->driver, bcm_enet_driver_name, 32); - strncpy(drvinfo->version, bcm_enet_driver_version, 32); - strncpy(drvinfo->fw_version, "N/A", 32); - strncpy(drvinfo->bus_info, "bcm63xx", 32); + strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, bcm_enet_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); drvinfo->n_stats = BCM_ENET_STATS_LEN; } diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c new file mode 100644 index 000000000000..3fd32880e526 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -0,0 +1,1461 @@ +/* + * Driver for (BCM4706)? GBit MAC core on BCMA bus. + * + * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com> + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include "bgmac.h" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/mii.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <asm/mach-bcm47xx/nvram.h> + +static const struct bcma_device_id bgmac_bcma_tbl[] = { + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), + BCMA_CORETABLE_END +}; +MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); + +static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask, + u32 value, int timeout) +{ + u32 val; + int i; + + for (i = 0; i < timeout / 10; i++) { + val = bcma_read32(core, reg); + if ((val & mask) == value) + return true; + udelay(10); + } + pr_err("Timeout waiting for reg 0x%X\n", reg); + return false; +} + +/************************************************** + * DMA + **************************************************/ + +static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) +{ + u32 val; + int i; + + if (!ring->mmio_base) + return; + + /* Suspend DMA TX ring first. + * bgmac_wait_value doesn't support waiting for any of few values, so + * implement whole loop here. + */ + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, + BGMAC_DMA_TX_SUSPEND); + for (i = 0; i < 10000 / 10; i++) { + val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); + val &= BGMAC_DMA_TX_STAT; + if (val == BGMAC_DMA_TX_STAT_DISABLED || + val == BGMAC_DMA_TX_STAT_IDLEWAIT || + val == BGMAC_DMA_TX_STAT_STOPPED) { + i = 0; + break; + } + udelay(10); + } + if (i) + bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n", + ring->mmio_base, val); + + /* Remove SUSPEND bit */ + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0); + if (!bgmac_wait_value(bgmac->core, + ring->mmio_base + BGMAC_DMA_TX_STATUS, + BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED, + 10000)) { + bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n", + ring->mmio_base); + udelay(300); + val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); + if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED) + bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n", + ring->mmio_base); + } +} + +static void bgmac_dma_tx_enable(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) +{ + u32 ctl; + + ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL); + ctl |= BGMAC_DMA_TX_ENABLE; + ctl |= BGMAC_DMA_TX_PARITY_DISABLE; + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl); +} + +static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, + struct bgmac_dma_ring *ring, + struct sk_buff *skb) +{ + struct device *dma_dev = bgmac->core->dma_dev; + struct net_device *net_dev = bgmac->net_dev; + struct bgmac_dma_desc *dma_desc; + struct bgmac_slot_info *slot; + u32 ctl0, ctl1; + int free_slots; + + if (skb->len > BGMAC_DESC_CTL1_LEN) { + bgmac_err(bgmac, "Too long skb (%d)\n", skb->len); + goto err_stop_drop; + } + + if (ring->start <= ring->end) + free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS; + else + free_slots = ring->start - ring->end; + if (free_slots == 1) { + bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n"); + netif_stop_queue(net_dev); + return NETDEV_TX_BUSY; + } + + slot = &ring->slots[ring->end]; + slot->skb = skb; + slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, slot->dma_addr)) { + bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", + ring->mmio_base); + goto err_stop_drop; + } + + ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF; + if (ring->end == ring->num_slots - 1) + ctl0 |= BGMAC_DESC_CTL0_EOT; + ctl1 = skb->len & BGMAC_DESC_CTL1_LEN; + + dma_desc = ring->cpu_base; + dma_desc += ring->end; + dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr)); + dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr)); + dma_desc->ctl0 = cpu_to_le32(ctl0); + dma_desc->ctl1 = cpu_to_le32(ctl1); + + wmb(); + + /* Increase ring->end to point empty slot. We tell hardware the first + * slot it should *not* read. + */ + if (++ring->end >= BGMAC_TX_RING_SLOTS) + ring->end = 0; + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, + ring->end * sizeof(struct bgmac_dma_desc)); + + /* Always keep one slot free to allow detecting bugged calls. */ + if (--free_slots == 1) + netif_stop_queue(net_dev); + + return NETDEV_TX_OK; + +err_stop_drop: + netif_stop_queue(net_dev); + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +/* Free transmitted packets */ +static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) +{ + struct device *dma_dev = bgmac->core->dma_dev; + int empty_slot; + bool freed = false; + + /* The last slot that hardware didn't consume yet */ + empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); + empty_slot &= BGMAC_DMA_TX_STATDPTR; + empty_slot /= sizeof(struct bgmac_dma_desc); + + while (ring->start != empty_slot) { + struct bgmac_slot_info *slot = &ring->slots[ring->start]; + + if (slot->skb) { + /* Unmap no longer used buffer */ + dma_unmap_single(dma_dev, slot->dma_addr, + slot->skb->len, DMA_TO_DEVICE); + slot->dma_addr = 0; + + /* Free memory! :) */ + dev_kfree_skb(slot->skb); + slot->skb = NULL; + } else { + bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n", + ring->start, ring->end); + } + + if (++ring->start >= BGMAC_TX_RING_SLOTS) + ring->start = 0; + freed = true; + } + + if (freed && netif_queue_stopped(bgmac->net_dev)) + netif_wake_queue(bgmac->net_dev); +} + +static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) +{ + if (!ring->mmio_base) + return; + + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0); + if (!bgmac_wait_value(bgmac->core, + ring->mmio_base + BGMAC_DMA_RX_STATUS, + BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED, + 10000)) + bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n", + ring->mmio_base); +} + +static void bgmac_dma_rx_enable(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) +{ + u32 ctl; + + ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); + ctl &= BGMAC_DMA_RX_ADDREXT_MASK; + ctl |= BGMAC_DMA_RX_ENABLE; + ctl |= BGMAC_DMA_RX_PARITY_DISABLE; + ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; + ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT; + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl); +} + +static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, + struct bgmac_slot_info *slot) +{ + struct device *dma_dev = bgmac->core->dma_dev; + struct bgmac_rx_header *rx; + + /* Alloc skb */ + slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); + if (!slot->skb) { + bgmac_err(bgmac, "Allocation of skb failed!\n"); + return -ENOMEM; + } + + /* Poison - if everything goes fine, hardware will overwrite it */ + rx = (struct bgmac_rx_header *)slot->skb->data; + rx->len = cpu_to_le16(0xdead); + rx->flags = cpu_to_le16(0xbeef); + + /* Map skb for the DMA */ + slot->dma_addr = dma_map_single(dma_dev, slot->skb->data, + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dma_dev, slot->dma_addr)) { + bgmac_err(bgmac, "DMA mapping error\n"); + return -ENOMEM; + } + if (slot->dma_addr & 0xC0000000) + bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); + + return 0; +} + +static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, + int weight) +{ + u32 end_slot; + int handled = 0; + + end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS); + end_slot &= BGMAC_DMA_RX_STATDPTR; + end_slot /= sizeof(struct bgmac_dma_desc); + + ring->end = end_slot; + + while (ring->start != ring->end) { + struct device *dma_dev = bgmac->core->dma_dev; + struct bgmac_slot_info *slot = &ring->slots[ring->start]; + struct sk_buff *skb = slot->skb; + struct sk_buff *new_skb; + struct bgmac_rx_header *rx; + u16 len, flags; + + /* Unmap buffer to make it accessible to the CPU */ + dma_sync_single_for_cpu(dma_dev, slot->dma_addr, + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + + /* Get info from the header */ + rx = (struct bgmac_rx_header *)skb->data; + len = le16_to_cpu(rx->len); + flags = le16_to_cpu(rx->flags); + + /* Check for poison and drop or pass the packet */ + if (len == 0xdead && flags == 0xbeef) { + bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", + ring->start); + } else { + new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len); + if (new_skb) { + skb_put(new_skb, len); + skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET, + new_skb->data, + len); + new_skb->protocol = + eth_type_trans(new_skb, bgmac->net_dev); + netif_receive_skb(new_skb); + handled++; + } else { + bgmac->net_dev->stats.rx_dropped++; + bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n"); + } + + /* Poison the old skb */ + rx->len = cpu_to_le16(0xdead); + rx->flags = cpu_to_le16(0xbeef); + } + + /* Make it back accessible to the hardware */ + dma_sync_single_for_device(dma_dev, slot->dma_addr, + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + + if (++ring->start >= BGMAC_RX_RING_SLOTS) + ring->start = 0; + + if (handled >= weight) /* Should never be greater */ + break; + } + + return handled; +} + +/* Does ring support unaligned addressing? */ +static bool bgmac_dma_unaligned(struct bgmac *bgmac, + struct bgmac_dma_ring *ring, + enum bgmac_dma_ring_type ring_type) +{ + switch (ring_type) { + case BGMAC_DMA_RING_TX: + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, + 0xff0); + if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO)) + return true; + break; + case BGMAC_DMA_RING_RX: + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, + 0xff0); + if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO)) + return true; + break; + } + return false; +} + +static void bgmac_dma_ring_free(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) +{ + struct device *dma_dev = bgmac->core->dma_dev; + struct bgmac_slot_info *slot; + int size; + int i; + + for (i = 0; i < ring->num_slots; i++) { + slot = &ring->slots[i]; + if (slot->skb) { + if (slot->dma_addr) + dma_unmap_single(dma_dev, slot->dma_addr, + slot->skb->len, DMA_TO_DEVICE); + dev_kfree_skb(slot->skb); + } + } + + if (ring->cpu_base) { + /* Free ring of descriptors */ + size = ring->num_slots * sizeof(struct bgmac_dma_desc); + dma_free_coherent(dma_dev, size, ring->cpu_base, + ring->dma_base); + } +} + +static void bgmac_dma_free(struct bgmac *bgmac) +{ + int i; + + for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) + bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]); + for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) + bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]); +} + +static int bgmac_dma_alloc(struct bgmac *bgmac) +{ + struct device *dma_dev = bgmac->core->dma_dev; + struct bgmac_dma_ring *ring; + static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, + BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; + int size; /* ring size: different for Tx and Rx */ + int err; + int i; + + BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); + BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base)); + + if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) { + bgmac_err(bgmac, "Core does not report 64-bit DMA\n"); + return -ENOTSUPP; + } + + for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { + ring = &bgmac->tx_ring[i]; + ring->num_slots = BGMAC_TX_RING_SLOTS; + ring->mmio_base = ring_base[i]; + if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX)) + bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n", + ring->mmio_base); + + /* Alloc ring of descriptors */ + size = ring->num_slots * sizeof(struct bgmac_dma_desc); + ring->cpu_base = dma_zalloc_coherent(dma_dev, size, + &ring->dma_base, + GFP_KERNEL); + if (!ring->cpu_base) { + bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n", + ring->mmio_base); + goto err_dma_free; + } + if (ring->dma_base & 0xC0000000) + bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); + + /* No need to alloc TX slots yet */ + } + + for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { + ring = &bgmac->rx_ring[i]; + ring->num_slots = BGMAC_RX_RING_SLOTS; + ring->mmio_base = ring_base[i]; + if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX)) + bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n", + ring->mmio_base); + + /* Alloc ring of descriptors */ + size = ring->num_slots * sizeof(struct bgmac_dma_desc); + ring->cpu_base = dma_zalloc_coherent(dma_dev, size, + &ring->dma_base, + GFP_KERNEL); + if (!ring->cpu_base) { + bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n", + ring->mmio_base); + err = -ENOMEM; + goto err_dma_free; + } + if (ring->dma_base & 0xC0000000) + bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); + + /* Alloc RX slots */ + for (i = 0; i < ring->num_slots; i++) { + err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[i]); + if (err) { + bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n"); + goto err_dma_free; + } + } + } + + return 0; + +err_dma_free: + bgmac_dma_free(bgmac); + return -ENOMEM; +} + +static void bgmac_dma_init(struct bgmac *bgmac) +{ + struct bgmac_dma_ring *ring; + struct bgmac_dma_desc *dma_desc; + u32 ctl0, ctl1; + int i; + + for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { + ring = &bgmac->tx_ring[i]; + + /* We don't implement unaligned addressing, so enable first */ + bgmac_dma_tx_enable(bgmac, ring); + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, + lower_32_bits(ring->dma_base)); + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI, + upper_32_bits(ring->dma_base)); + + ring->start = 0; + ring->end = 0; /* Points the slot that should *not* be read */ + } + + for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { + ring = &bgmac->rx_ring[i]; + + /* We don't implement unaligned addressing, so enable first */ + bgmac_dma_rx_enable(bgmac, ring); + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, + lower_32_bits(ring->dma_base)); + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, + upper_32_bits(ring->dma_base)); + + for (i = 0, dma_desc = ring->cpu_base; i < ring->num_slots; + i++, dma_desc++) { + ctl0 = ctl1 = 0; + + if (i == ring->num_slots - 1) + ctl0 |= BGMAC_DESC_CTL0_EOT; + ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; + /* Is there any BGMAC device that requires extension? */ + /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & + * B43_DMA64_DCTL1_ADDREXT_MASK; + */ + + dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[i].dma_addr)); + dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[i].dma_addr)); + dma_desc->ctl0 = cpu_to_le32(ctl0); + dma_desc->ctl1 = cpu_to_le32(ctl1); + } + + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, + ring->num_slots * sizeof(struct bgmac_dma_desc)); + + ring->start = 0; + ring->end = 0; + } +} + +/************************************************** + * PHY ops + **************************************************/ + +static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg) +{ + struct bcma_device *core; + u16 phy_access_addr; + u16 phy_ctl_addr; + u32 tmp; + + BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK); + BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK); + BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT); + BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK); + BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT); + BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE); + BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START); + BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK); + BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK); + BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); + BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); + + if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bgmac->core->bus->drv_gmac_cmn.core; + phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; + phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; + } else { + core = bgmac->core; + phy_access_addr = BGMAC_PHY_ACCESS; + phy_ctl_addr = BGMAC_PHY_CNTL; + } + + tmp = bcma_read32(core, phy_ctl_addr); + tmp &= ~BGMAC_PC_EPA_MASK; + tmp |= phyaddr; + bcma_write32(core, phy_ctl_addr, tmp); + + tmp = BGMAC_PA_START; + tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; + tmp |= reg << BGMAC_PA_REG_SHIFT; + bcma_write32(core, phy_access_addr, tmp); + + if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { + bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n", + phyaddr, reg); + return 0xffff; + } + + return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK; +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ +static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value) +{ + struct bcma_device *core; + u16 phy_access_addr; + u16 phy_ctl_addr; + u32 tmp; + + if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bgmac->core->bus->drv_gmac_cmn.core; + phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; + phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; + } else { + core = bgmac->core; + phy_access_addr = BGMAC_PHY_ACCESS; + phy_ctl_addr = BGMAC_PHY_CNTL; + } + + tmp = bcma_read32(core, phy_ctl_addr); + tmp &= ~BGMAC_PC_EPA_MASK; + tmp |= phyaddr; + bcma_write32(core, phy_ctl_addr, tmp); + + bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO); + if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) + bgmac_warn(bgmac, "Error setting MDIO int\n"); + + tmp = BGMAC_PA_START; + tmp |= BGMAC_PA_WRITE; + tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; + tmp |= reg << BGMAC_PA_REG_SHIFT; + tmp |= value; + bcma_write32(core, phy_access_addr, tmp); + + if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { + bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n", + phyaddr, reg); + return -ETIMEDOUT; + } + + return 0; +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */ +static void bgmac_phy_force(struct bgmac *bgmac) +{ + u16 ctl; + u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB | + BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX); + + if (bgmac->phyaddr == BGMAC_PHY_NOREGS) + return; + + if (bgmac->autoneg) + return; + + ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL); + ctl &= mask; + if (bgmac->full_duplex) + ctl |= BGMAC_PHY_CTL_DUPLEX; + if (bgmac->speed == BGMAC_SPEED_100) + ctl |= BGMAC_PHY_CTL_SPEED_100; + else if (bgmac->speed == BGMAC_SPEED_1000) + ctl |= BGMAC_PHY_CTL_SPEED_1000; + bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl); +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */ +static void bgmac_phy_advertise(struct bgmac *bgmac) +{ + u16 adv; + + if (bgmac->phyaddr == BGMAC_PHY_NOREGS) + return; + + if (!bgmac->autoneg) + return; + + /* Adv selected 10/100 speeds */ + adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV); + adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL | + BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL); + if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10) + adv |= BGMAC_PHY_ADV_10HALF; + if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100) + adv |= BGMAC_PHY_ADV_100HALF; + if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10) + adv |= BGMAC_PHY_ADV_10FULL; + if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100) + adv |= BGMAC_PHY_ADV_100FULL; + bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv); + + /* Adv selected 1000 speeds */ + adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2); + adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL); + if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000) + adv |= BGMAC_PHY_ADV2_1000HALF; + if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000) + adv |= BGMAC_PHY_ADV2_1000FULL; + bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv); + + /* Restart */ + bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, + bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) | + BGMAC_PHY_CTL_RESTART); +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ +static void bgmac_phy_init(struct bgmac *bgmac) +{ + struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; + struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; + u8 i; + + if (ci->id == BCMA_CHIP_ID_BCM5356) { + for (i = 0; i < 5; i++) { + bgmac_phy_write(bgmac, i, 0x1f, 0x008b); + bgmac_phy_write(bgmac, i, 0x15, 0x0100); + bgmac_phy_write(bgmac, i, 0x1f, 0x000f); + bgmac_phy_write(bgmac, i, 0x12, 0x2aaa); + bgmac_phy_write(bgmac, i, 0x1f, 0x000b); + } + } + if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || + (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || + (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { + bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); + bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); + for (i = 0; i < 5; i++) { + bgmac_phy_write(bgmac, i, 0x1f, 0x000f); + bgmac_phy_write(bgmac, i, 0x16, 0x5284); + bgmac_phy_write(bgmac, i, 0x1f, 0x000b); + bgmac_phy_write(bgmac, i, 0x17, 0x0010); + bgmac_phy_write(bgmac, i, 0x1f, 0x000f); + bgmac_phy_write(bgmac, i, 0x16, 0x5296); + bgmac_phy_write(bgmac, i, 0x17, 0x1073); + bgmac_phy_write(bgmac, i, 0x17, 0x9073); + bgmac_phy_write(bgmac, i, 0x16, 0x52b6); + bgmac_phy_write(bgmac, i, 0x17, 0x9273); + bgmac_phy_write(bgmac, i, 0x1f, 0x000b); + } + } +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ +static void bgmac_phy_reset(struct bgmac *bgmac) +{ + if (bgmac->phyaddr == BGMAC_PHY_NOREGS) + return; + + bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, + BGMAC_PHY_CTL_RESET); + udelay(100); + if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) & + BGMAC_PHY_CTL_RESET) + bgmac_err(bgmac, "PHY reset failed\n"); + bgmac_phy_init(bgmac); +} + +/************************************************** + * Chip ops + **************************************************/ + +/* TODO: can we just drop @force? Can we don't reset MAC at all if there is + * nothing to change? Try if after stabilizng driver. + */ +static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set, + bool force) +{ + u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); + u32 new_val = (cmdcfg & mask) | set; + + bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR); + udelay(2); + + if (new_val != cmdcfg || force) + bgmac_write(bgmac, BGMAC_CMDCFG, new_val); + + bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR); + udelay(2); +} + +static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr) +{ + u32 tmp; + + tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; + bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp); + tmp = (addr[4] << 8) | addr[5]; + bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp); +} + +static void bgmac_set_rx_mode(struct net_device *net_dev) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + + if (net_dev->flags & IFF_PROMISC) + bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true); + else + bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true); +} + +#if 0 /* We don't use that regs yet */ +static void bgmac_chip_stats_update(struct bgmac *bgmac) +{ + int i; + + if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) { + for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) + bgmac->mib_tx_regs[i] = + bgmac_read(bgmac, + BGMAC_TX_GOOD_OCTETS + (i * 4)); + for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++) + bgmac->mib_rx_regs[i] = + bgmac_read(bgmac, + BGMAC_RX_GOOD_OCTETS + (i * 4)); + } + + /* TODO: what else? how to handle BCM4706? Specs are needed */ +} +#endif + +static void bgmac_clear_mib(struct bgmac *bgmac) +{ + int i; + + if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) + return; + + bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR); + for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) + bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4)); + for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++) + bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4)); +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */ +static void bgmac_speed(struct bgmac *bgmac, int speed) +{ + u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD); + u32 set = 0; + + if (speed & BGMAC_SPEED_10) + set |= BGMAC_CMDCFG_ES_10; + if (speed & BGMAC_SPEED_100) + set |= BGMAC_CMDCFG_ES_100; + if (speed & BGMAC_SPEED_1000) + set |= BGMAC_CMDCFG_ES_1000; + if (!bgmac->full_duplex) + set |= BGMAC_CMDCFG_HD; + bgmac_cmdcfg_maskset(bgmac, mask, set, true); +} + +static void bgmac_miiconfig(struct bgmac *bgmac) +{ + u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> + BGMAC_DS_MM_SHIFT; + if (imode == 0 || imode == 1) { + if (bgmac->autoneg) + bgmac_speed(bgmac, BGMAC_SPEED_100); + else + bgmac_speed(bgmac, bgmac->speed); + } +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */ +static void bgmac_chip_reset(struct bgmac *bgmac) +{ + struct bcma_device *core = bgmac->core; + struct bcma_bus *bus = core->bus; + struct bcma_chipinfo *ci = &bus->chipinfo; + u32 flags = 0; + u32 iost; + int i; + + if (bcma_core_is_enabled(core)) { + if (!bgmac->stats_grabbed) { + /* bgmac_chip_stats_update(bgmac); */ + bgmac->stats_grabbed = true; + } + + for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) + bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]); + + bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false); + udelay(1); + + for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) + bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]); + + /* TODO: Clear software multicast filter list */ + } + + iost = bcma_aread32(core, BCMA_IOST); + if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) || + (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || + (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) + iost &= ~BGMAC_BCMA_IOST_ATTACHED; + + if (iost & BGMAC_BCMA_IOST_ATTACHED) { + flags = BGMAC_BCMA_IOCTL_SW_CLKEN; + if (!bgmac->has_robosw) + flags |= BGMAC_BCMA_IOCTL_SW_RESET; + } + + bcma_core_enable(core, flags); + + if (core->id.rev > 2) { + bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8); + bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24, + 1000); + } + + if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 || + ci->id == BCMA_CHIP_ID_BCM53572) { + struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; + u8 et_swtype = 0; + u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY | + BGMAC_CHIPCTL_1_IF_TYPE_RMII; + char buf[2]; + + if (nvram_getenv("et_swtype", buf, 1) > 0) { + if (kstrtou8(buf, 0, &et_swtype)) + bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n", + buf); + et_swtype &= 0x0f; + et_swtype <<= 4; + sw_type = et_swtype; + } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) { + sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; + } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) || + (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) { + sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII | + BGMAC_CHIPCTL_1_SW_TYPE_RGMII; + } + bcma_chipco_chipctl_maskset(cc, 1, + ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | + BGMAC_CHIPCTL_1_SW_TYPE_MASK), + sw_type); + } + + if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) + bcma_awrite32(core, BCMA_IOCTL, + bcma_aread32(core, BCMA_IOCTL) & + ~BGMAC_BCMA_IOCTL_SW_RESET); + + /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset + * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine + * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to + * be keps until taking MAC out of the reset. + */ + bgmac_cmdcfg_maskset(bgmac, + ~(BGMAC_CMDCFG_TE | + BGMAC_CMDCFG_RE | + BGMAC_CMDCFG_RPI | + BGMAC_CMDCFG_TAI | + BGMAC_CMDCFG_HD | + BGMAC_CMDCFG_ML | + BGMAC_CMDCFG_CFE | + BGMAC_CMDCFG_RL | + BGMAC_CMDCFG_RED | + BGMAC_CMDCFG_PE | + BGMAC_CMDCFG_TPI | + BGMAC_CMDCFG_PAD_EN | + BGMAC_CMDCFG_PF), + BGMAC_CMDCFG_PROM | + BGMAC_CMDCFG_NLC | + BGMAC_CMDCFG_CFE | + BGMAC_CMDCFG_SR, + false); + + bgmac_clear_mib(bgmac); + if (core->id.id == BCMA_CORE_4706_MAC_GBIT) + bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0, + BCMA_GMAC_CMN_PC_MTE); + else + bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE); + bgmac_miiconfig(bgmac); + bgmac_phy_init(bgmac); + + bgmac->int_status = 0; +} + +static void bgmac_chip_intrs_on(struct bgmac *bgmac) +{ + bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask); +} + +static void bgmac_chip_intrs_off(struct bgmac *bgmac) +{ + bgmac_write(bgmac, BGMAC_INT_MASK, 0); + bgmac_read(bgmac, BGMAC_INT_MASK); +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */ +static void bgmac_enable(struct bgmac *bgmac) +{ + struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; + u32 cmdcfg; + u32 mode; + u32 rxq_ctl; + u32 fl_ctl; + u16 bp_clk; + u8 mdp; + + cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); + bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE), + BGMAC_CMDCFG_SR, true); + udelay(2); + cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE; + bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg); + + mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> + BGMAC_DS_MM_SHIFT; + if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0) + bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); + if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2) + bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0, + BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); + + switch (ci->id) { + case BCMA_CHIP_ID_BCM5357: + case BCMA_CHIP_ID_BCM4749: + case BCMA_CHIP_ID_BCM53572: + case BCMA_CHIP_ID_BCM4716: + case BCMA_CHIP_ID_BCM47162: + fl_ctl = 0x03cb04cb; + if (ci->id == BCMA_CHIP_ID_BCM5357 || + ci->id == BCMA_CHIP_ID_BCM4749 || + ci->id == BCMA_CHIP_ID_BCM53572) + fl_ctl = 0x2300e1; + bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl); + bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff); + break; + } + + rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); + rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; + bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000; + mdp = (bp_clk * 128 / 1000) - 3; + rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT); + bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl); +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */ +static void bgmac_chip_init(struct bgmac *bgmac, bool full_init) +{ + struct bgmac_dma_ring *ring; + int i; + + /* 1 interrupt per received frame */ + bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT); + + /* Enable 802.3x tx flow control (honor received PAUSE frames) */ + bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true); + + bgmac_set_rx_mode(bgmac->net_dev); + + bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr); + + if (bgmac->loopback) + bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false); + else + bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false); + + bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN); + + if (!bgmac->autoneg) { + bgmac_speed(bgmac, bgmac->speed); + bgmac_phy_force(bgmac); + } else if (bgmac->speed) { /* if there is anything to adv */ + bgmac_phy_advertise(bgmac); + } + + if (full_init) { + bgmac_dma_init(bgmac); + if (1) /* FIXME: is there any case we don't want IRQs? */ + bgmac_chip_intrs_on(bgmac); + } else { + for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { + ring = &bgmac->rx_ring[i]; + bgmac_dma_rx_enable(bgmac, ring); + } + } + + bgmac_enable(bgmac); +} + +static irqreturn_t bgmac_interrupt(int irq, void *dev_id) +{ + struct bgmac *bgmac = netdev_priv(dev_id); + + u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS); + int_status &= bgmac->int_mask; + + if (!int_status) + return IRQ_NONE; + + /* Ack */ + bgmac_write(bgmac, BGMAC_INT_STATUS, int_status); + + /* Disable new interrupts until handling existing ones */ + bgmac_chip_intrs_off(bgmac); + + bgmac->int_status = int_status; + + napi_schedule(&bgmac->napi); + + return IRQ_HANDLED; +} + +static int bgmac_poll(struct napi_struct *napi, int weight) +{ + struct bgmac *bgmac = container_of(napi, struct bgmac, napi); + struct bgmac_dma_ring *ring; + int handled = 0; + + if (bgmac->int_status & BGMAC_IS_TX0) { + ring = &bgmac->tx_ring[0]; + bgmac_dma_tx_free(bgmac, ring); + bgmac->int_status &= ~BGMAC_IS_TX0; + } + + if (bgmac->int_status & BGMAC_IS_RX) { + ring = &bgmac->rx_ring[0]; + handled += bgmac_dma_rx_read(bgmac, ring, weight); + bgmac->int_status &= ~BGMAC_IS_RX; + } + + if (bgmac->int_status) { + bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status); + bgmac->int_status = 0; + } + + if (handled < weight) + napi_complete(napi); + + bgmac_chip_intrs_on(bgmac); + + return handled; +} + +/************************************************** + * net_device_ops + **************************************************/ + +static int bgmac_open(struct net_device *net_dev) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + int err = 0; + + bgmac_chip_reset(bgmac); + /* Specs say about reclaiming rings here, but we do that in DMA init */ + bgmac_chip_init(bgmac, true); + + err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED, + KBUILD_MODNAME, net_dev); + if (err < 0) { + bgmac_err(bgmac, "IRQ request error: %d!\n", err); + goto err_out; + } + napi_enable(&bgmac->napi); + + netif_carrier_on(net_dev); + +err_out: + return err; +} + +static int bgmac_stop(struct net_device *net_dev) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + + netif_carrier_off(net_dev); + + napi_disable(&bgmac->napi); + bgmac_chip_intrs_off(bgmac); + free_irq(bgmac->core->irq, net_dev); + + bgmac_chip_reset(bgmac); + + return 0; +} + +static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + struct bgmac_dma_ring *ring; + + /* No QOS support yet */ + ring = &bgmac->tx_ring[0]; + return bgmac_dma_tx_add(bgmac, ring, skb); +} + +static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + int ret; + + ret = eth_prepare_mac_addr_change(net_dev, addr); + if (ret < 0) + return ret; + bgmac_write_mac_address(bgmac, (u8 *)addr); + eth_commit_mac_addr_change(net_dev, addr); + return 0; +} + +static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + struct mii_ioctl_data *data = if_mii(ifr); + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = bgmac->phyaddr; + /* fallthru */ + case SIOCGMIIREG: + if (!netif_running(net_dev)) + return -EAGAIN; + data->val_out = bgmac_phy_read(bgmac, data->phy_id, + data->reg_num & 0x1f); + return 0; + case SIOCSMIIREG: + if (!netif_running(net_dev)) + return -EAGAIN; + bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f, + data->val_in); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static const struct net_device_ops bgmac_netdev_ops = { + .ndo_open = bgmac_open, + .ndo_stop = bgmac_stop, + .ndo_start_xmit = bgmac_start_xmit, + .ndo_set_rx_mode = bgmac_set_rx_mode, + .ndo_set_mac_address = bgmac_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = bgmac_ioctl, +}; + +/************************************************** + * ethtool_ops + **************************************************/ + +static int bgmac_get_settings(struct net_device *net_dev, + struct ethtool_cmd *cmd) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + + cmd->supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg; + + if (bgmac->autoneg) { + WARN_ON(cmd->advertising); + if (bgmac->full_duplex) { + if (bgmac->speed & BGMAC_SPEED_10) + cmd->advertising |= ADVERTISED_10baseT_Full; + if (bgmac->speed & BGMAC_SPEED_100) + cmd->advertising |= ADVERTISED_100baseT_Full; + if (bgmac->speed & BGMAC_SPEED_1000) + cmd->advertising |= ADVERTISED_1000baseT_Full; + } else { + if (bgmac->speed & BGMAC_SPEED_10) + cmd->advertising |= ADVERTISED_10baseT_Half; + if (bgmac->speed & BGMAC_SPEED_100) + cmd->advertising |= ADVERTISED_100baseT_Half; + if (bgmac->speed & BGMAC_SPEED_1000) + cmd->advertising |= ADVERTISED_1000baseT_Half; + } + } else { + switch (bgmac->speed) { + case BGMAC_SPEED_10: + ethtool_cmd_speed_set(cmd, SPEED_10); + break; + case BGMAC_SPEED_100: + ethtool_cmd_speed_set(cmd, SPEED_100); + break; + case BGMAC_SPEED_1000: + ethtool_cmd_speed_set(cmd, SPEED_1000); + break; + } + } + + cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + + cmd->autoneg = bgmac->autoneg; + + return 0; +} + +#if 0 +static int bgmac_set_settings(struct net_device *net_dev, + struct ethtool_cmd *cmd) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + + return -1; +} +#endif + +static void bgmac_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info)); +} + +static const struct ethtool_ops bgmac_ethtool_ops = { + .get_settings = bgmac_get_settings, + .get_drvinfo = bgmac_get_drvinfo, +}; + +/************************************************** + * BCMA bus ops + **************************************************/ + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ +static int bgmac_probe(struct bcma_device *core) +{ + struct net_device *net_dev; + struct bgmac *bgmac; + struct ssb_sprom *sprom = &core->bus->sprom; + u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac; + int err; + + /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */ + if (core->core_unit > 1) { + pr_err("Unsupported core_unit %d\n", core->core_unit); + return -ENOTSUPP; + } + + if (!is_valid_ether_addr(mac)) { + dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac); + eth_random_addr(mac); + dev_warn(&core->dev, "Using random MAC: %pM\n", mac); + } + + /* Allocation and references */ + net_dev = alloc_etherdev(sizeof(*bgmac)); + if (!net_dev) + return -ENOMEM; + net_dev->netdev_ops = &bgmac_netdev_ops; + net_dev->irq = core->irq; + SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops); + bgmac = netdev_priv(net_dev); + bgmac->net_dev = net_dev; + bgmac->core = core; + bcma_set_drvdata(core, bgmac); + + /* Defaults */ + bgmac->autoneg = true; + bgmac->full_duplex = true; + bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000; + memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN); + + /* On BCM4706 we need common core to access PHY */ + if (core->id.id == BCMA_CORE_4706_MAC_GBIT && + !core->bus->drv_gmac_cmn.core) { + bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n"); + err = -ENODEV; + goto err_netdev_free; + } + bgmac->cmn = core->bus->drv_gmac_cmn.core; + + bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr : + sprom->et0phyaddr; + bgmac->phyaddr &= BGMAC_PHY_MASK; + if (bgmac->phyaddr == BGMAC_PHY_MASK) { + bgmac_err(bgmac, "No PHY found\n"); + err = -ENODEV; + goto err_netdev_free; + } + bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr, + bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : ""); + + if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) { + bgmac_err(bgmac, "PCI setup not implemented\n"); + err = -ENOTSUPP; + goto err_netdev_free; + } + + bgmac_chip_reset(bgmac); + + err = bgmac_dma_alloc(bgmac); + if (err) { + bgmac_err(bgmac, "Unable to alloc memory for DMA\n"); + goto err_netdev_free; + } + + bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; + if (nvram_getenv("et0_no_txint", NULL, 0) == 0) + bgmac->int_mask &= ~BGMAC_IS_TX_MASK; + + /* TODO: reset the external phy. Specs are needed */ + bgmac_phy_reset(bgmac); + + bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo & + BGMAC_BFL_ENETROBO); + if (bgmac->has_robosw) + bgmac_warn(bgmac, "Support for Roboswitch not implemented\n"); + + if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) + bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); + + err = register_netdev(bgmac->net_dev); + if (err) { + bgmac_err(bgmac, "Cannot register net device\n"); + err = -ENOTSUPP; + goto err_dma_free; + } + + netif_carrier_off(net_dev); + + netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); + + return 0; + +err_dma_free: + bgmac_dma_free(bgmac); + +err_netdev_free: + bcma_set_drvdata(core, NULL); + free_netdev(net_dev); + + return err; +} + +static void bgmac_remove(struct bcma_device *core) +{ + struct bgmac *bgmac = bcma_get_drvdata(core); + + netif_napi_del(&bgmac->napi); + unregister_netdev(bgmac->net_dev); + bgmac_dma_free(bgmac); + bcma_set_drvdata(core, NULL); + free_netdev(bgmac->net_dev); +} + +static struct bcma_driver bgmac_bcma_driver = { + .name = KBUILD_MODNAME, + .id_table = bgmac_bcma_tbl, + .probe = bgmac_probe, + .remove = bgmac_remove, +}; + +static int __init bgmac_init(void) +{ + int err; + + err = bcma_driver_register(&bgmac_bcma_driver); + if (err) + return err; + pr_info("Broadcom 47xx GBit MAC driver loaded\n"); + + return 0; +} + +static void __exit bgmac_exit(void) +{ + bcma_driver_unregister(&bgmac_bcma_driver); +} + +module_init(bgmac_init) +module_exit(bgmac_exit) + +MODULE_AUTHOR("Rafał Miłecki"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h new file mode 100644 index 000000000000..4ede614c81f8 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -0,0 +1,453 @@ +#ifndef _BGMAC_H +#define _BGMAC_H + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define bgmac_err(bgmac, fmt, ...) \ + dev_err(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) +#define bgmac_warn(bgmac, fmt, ...) \ + dev_warn(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) +#define bgmac_info(bgmac, fmt, ...) \ + dev_info(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) +#define bgmac_dbg(bgmac, fmt, ...) \ + dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) + +#include <linux/bcma/bcma.h> +#include <linux/netdevice.h> + +#define BGMAC_DEV_CTL 0x000 +#define BGMAC_DC_TSM 0x00000002 +#define BGMAC_DC_CFCO 0x00000004 +#define BGMAC_DC_RLSS 0x00000008 +#define BGMAC_DC_MROR 0x00000010 +#define BGMAC_DC_FCM_MASK 0x00000060 +#define BGMAC_DC_FCM_SHIFT 5 +#define BGMAC_DC_NAE 0x00000080 +#define BGMAC_DC_TF 0x00000100 +#define BGMAC_DC_RDS_MASK 0x00030000 +#define BGMAC_DC_RDS_SHIFT 16 +#define BGMAC_DC_TDS_MASK 0x000c0000 +#define BGMAC_DC_TDS_SHIFT 18 +#define BGMAC_DEV_STATUS 0x004 /* Configuration of the interface */ +#define BGMAC_DS_RBF 0x00000001 +#define BGMAC_DS_RDF 0x00000002 +#define BGMAC_DS_RIF 0x00000004 +#define BGMAC_DS_TBF 0x00000008 +#define BGMAC_DS_TDF 0x00000010 +#define BGMAC_DS_TIF 0x00000020 +#define BGMAC_DS_PO 0x00000040 +#define BGMAC_DS_MM_MASK 0x00000300 /* Mode of the interface */ +#define BGMAC_DS_MM_SHIFT 8 +#define BGMAC_BIST_STATUS 0x00c +#define BGMAC_INT_STATUS 0x020 /* Interrupt status */ +#define BGMAC_IS_MRO 0x00000001 +#define BGMAC_IS_MTO 0x00000002 +#define BGMAC_IS_TFD 0x00000004 +#define BGMAC_IS_LS 0x00000008 +#define BGMAC_IS_MDIO 0x00000010 +#define BGMAC_IS_MR 0x00000020 +#define BGMAC_IS_MT 0x00000040 +#define BGMAC_IS_TO 0x00000080 +#define BGMAC_IS_DESC_ERR 0x00000400 /* Descriptor error */ +#define BGMAC_IS_DATA_ERR 0x00000800 /* Data error */ +#define BGMAC_IS_DESC_PROT_ERR 0x00001000 /* Descriptor protocol error */ +#define BGMAC_IS_RX_DESC_UNDERF 0x00002000 /* Receive descriptor underflow */ +#define BGMAC_IS_RX_F_OVERF 0x00004000 /* Receive FIFO overflow */ +#define BGMAC_IS_TX_F_UNDERF 0x00008000 /* Transmit FIFO underflow */ +#define BGMAC_IS_RX 0x00010000 /* Interrupt for RX queue 0 */ +#define BGMAC_IS_TX0 0x01000000 /* Interrupt for TX queue 0 */ +#define BGMAC_IS_TX1 0x02000000 /* Interrupt for TX queue 1 */ +#define BGMAC_IS_TX2 0x04000000 /* Interrupt for TX queue 2 */ +#define BGMAC_IS_TX3 0x08000000 /* Interrupt for TX queue 3 */ +#define BGMAC_IS_TX_MASK 0x0f000000 +#define BGMAC_IS_INTMASK 0x0f01fcff +#define BGMAC_IS_ERRMASK 0x0000fc00 +#define BGMAC_INT_MASK 0x024 /* Interrupt mask */ +#define BGMAC_GP_TIMER 0x028 +#define BGMAC_INT_RECV_LAZY 0x100 +#define BGMAC_IRL_TO_MASK 0x00ffffff +#define BGMAC_IRL_FC_MASK 0xff000000 +#define BGMAC_IRL_FC_SHIFT 24 /* Shift the number of interrupts triggered per received frame */ +#define BGMAC_FLOW_CTL_THRESH 0x104 /* Flow control thresholds */ +#define BGMAC_WRRTHRESH 0x108 +#define BGMAC_GMAC_IDLE_CNT_THRESH 0x10c +#define BGMAC_PHY_ACCESS 0x180 /* PHY access address */ +#define BGMAC_PA_DATA_MASK 0x0000ffff +#define BGMAC_PA_ADDR_MASK 0x001f0000 +#define BGMAC_PA_ADDR_SHIFT 16 +#define BGMAC_PA_REG_MASK 0x1f000000 +#define BGMAC_PA_REG_SHIFT 24 +#define BGMAC_PA_WRITE 0x20000000 +#define BGMAC_PA_START 0x40000000 +#define BGMAC_PHY_CNTL 0x188 /* PHY control address */ +#define BGMAC_PC_EPA_MASK 0x0000001f +#define BGMAC_PC_MCT_MASK 0x007f0000 +#define BGMAC_PC_MCT_SHIFT 16 +#define BGMAC_PC_MTE 0x00800000 +#define BGMAC_TXQ_CTL 0x18c +#define BGMAC_TXQ_CTL_DBT_MASK 0x00000fff +#define BGMAC_TXQ_CTL_DBT_SHIFT 0 +#define BGMAC_RXQ_CTL 0x190 +#define BGMAC_RXQ_CTL_DBT_MASK 0x00000fff +#define BGMAC_RXQ_CTL_DBT_SHIFT 0 +#define BGMAC_RXQ_CTL_PTE 0x00001000 +#define BGMAC_RXQ_CTL_MDP_MASK 0x3f000000 +#define BGMAC_RXQ_CTL_MDP_SHIFT 24 +#define BGMAC_GPIO_SELECT 0x194 +#define BGMAC_GPIO_OUTPUT_EN 0x198 +/* For 0x1e0 see BCMA_CLKCTLST */ +#define BGMAC_HW_WAR 0x1e4 +#define BGMAC_PWR_CTL 0x1e8 +#define BGMAC_DMA_BASE0 0x200 /* Tx and Rx controller */ +#define BGMAC_DMA_BASE1 0x240 /* Tx controller only */ +#define BGMAC_DMA_BASE2 0x280 /* Tx controller only */ +#define BGMAC_DMA_BASE3 0x2C0 /* Tx controller only */ +#define BGMAC_TX_GOOD_OCTETS 0x300 +#define BGMAC_TX_GOOD_OCTETS_HIGH 0x304 +#define BGMAC_TX_GOOD_PKTS 0x308 +#define BGMAC_TX_OCTETS 0x30c +#define BGMAC_TX_OCTETS_HIGH 0x310 +#define BGMAC_TX_PKTS 0x314 +#define BGMAC_TX_BROADCAST_PKTS 0x318 +#define BGMAC_TX_MULTICAST_PKTS 0x31c +#define BGMAC_TX_LEN_64 0x320 +#define BGMAC_TX_LEN_65_TO_127 0x324 +#define BGMAC_TX_LEN_128_TO_255 0x328 +#define BGMAC_TX_LEN_256_TO_511 0x32c +#define BGMAC_TX_LEN_512_TO_1023 0x330 +#define BGMAC_TX_LEN_1024_TO_1522 0x334 +#define BGMAC_TX_LEN_1523_TO_2047 0x338 +#define BGMAC_TX_LEN_2048_TO_4095 0x33c +#define BGMAC_TX_LEN_4095_TO_8191 0x340 +#define BGMAC_TX_LEN_8192_TO_MAX 0x344 +#define BGMAC_TX_JABBER_PKTS 0x348 /* Error */ +#define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */ +#define BGMAC_TX_FRAGMENT_PKTS 0x350 +#define BGMAC_TX_UNDERRUNS 0x354 /* Error */ +#define BGMAC_TX_TOTAL_COLS 0x358 +#define BGMAC_TX_SINGLE_COLS 0x35c +#define BGMAC_TX_MULTIPLE_COLS 0x360 +#define BGMAC_TX_EXCESSIVE_COLS 0x364 /* Error */ +#define BGMAC_TX_LATE_COLS 0x368 /* Error */ +#define BGMAC_TX_DEFERED 0x36c +#define BGMAC_TX_CARRIER_LOST 0x370 +#define BGMAC_TX_PAUSE_PKTS 0x374 +#define BGMAC_TX_UNI_PKTS 0x378 +#define BGMAC_TX_Q0_PKTS 0x37c +#define BGMAC_TX_Q0_OCTETS 0x380 +#define BGMAC_TX_Q0_OCTETS_HIGH 0x384 +#define BGMAC_TX_Q1_PKTS 0x388 +#define BGMAC_TX_Q1_OCTETS 0x38c +#define BGMAC_TX_Q1_OCTETS_HIGH 0x390 +#define BGMAC_TX_Q2_PKTS 0x394 +#define BGMAC_TX_Q2_OCTETS 0x398 +#define BGMAC_TX_Q2_OCTETS_HIGH 0x39c +#define BGMAC_TX_Q3_PKTS 0x3a0 +#define BGMAC_TX_Q3_OCTETS 0x3a4 +#define BGMAC_TX_Q3_OCTETS_HIGH 0x3a8 +#define BGMAC_RX_GOOD_OCTETS 0x3b0 +#define BGMAC_RX_GOOD_OCTETS_HIGH 0x3b4 +#define BGMAC_RX_GOOD_PKTS 0x3b8 +#define BGMAC_RX_OCTETS 0x3bc +#define BGMAC_RX_OCTETS_HIGH 0x3c0 +#define BGMAC_RX_PKTS 0x3c4 +#define BGMAC_RX_BROADCAST_PKTS 0x3c8 +#define BGMAC_RX_MULTICAST_PKTS 0x3cc +#define BGMAC_RX_LEN_64 0x3d0 +#define BGMAC_RX_LEN_65_TO_127 0x3d4 +#define BGMAC_RX_LEN_128_TO_255 0x3d8 +#define BGMAC_RX_LEN_256_TO_511 0x3dc +#define BGMAC_RX_LEN_512_TO_1023 0x3e0 +#define BGMAC_RX_LEN_1024_TO_1522 0x3e4 +#define BGMAC_RX_LEN_1523_TO_2047 0x3e8 +#define BGMAC_RX_LEN_2048_TO_4095 0x3ec +#define BGMAC_RX_LEN_4095_TO_8191 0x3f0 +#define BGMAC_RX_LEN_8192_TO_MAX 0x3f4 +#define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */ +#define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */ +#define BGMAC_RX_FRAGMENT_PKTS 0x400 +#define BGMAC_RX_MISSED_PKTS 0x404 /* Error */ +#define BGMAC_RX_CRC_ALIGN_ERRS 0x408 /* Error */ +#define BGMAC_RX_UNDERSIZE 0x40c /* Error */ +#define BGMAC_RX_CRC_ERRS 0x410 /* Error */ +#define BGMAC_RX_ALIGN_ERRS 0x414 /* Error */ +#define BGMAC_RX_SYMBOL_ERRS 0x418 /* Error */ +#define BGMAC_RX_PAUSE_PKTS 0x41c +#define BGMAC_RX_NONPAUSE_PKTS 0x420 +#define BGMAC_RX_SACHANGES 0x424 +#define BGMAC_RX_UNI_PKTS 0x428 +#define BGMAC_UNIMAC_VERSION 0x800 +#define BGMAC_HDBKP_CTL 0x804 +#define BGMAC_CMDCFG 0x808 /* Configuration */ +#define BGMAC_CMDCFG_TE 0x00000001 /* Set to activate TX */ +#define BGMAC_CMDCFG_RE 0x00000002 /* Set to activate RX */ +#define BGMAC_CMDCFG_ES_MASK 0x0000000c /* Ethernet speed see gmac_speed */ +#define BGMAC_CMDCFG_ES_10 0x00000000 +#define BGMAC_CMDCFG_ES_100 0x00000004 +#define BGMAC_CMDCFG_ES_1000 0x00000008 +#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */ +#define BGMAC_CMDCFG_PAD_EN 0x00000020 +#define BGMAC_CMDCFG_CF 0x00000040 +#define BGMAC_CMDCFG_PF 0x00000080 +#define BGMAC_CMDCFG_RPI 0x00000100 /* Unset to enable 802.3x tx flow control */ +#define BGMAC_CMDCFG_TAI 0x00000200 +#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */ +#define BGMAC_CMDCFG_HD_SHIFT 10 +#define BGMAC_CMDCFG_SR 0x00000800 /* Set to reset mode */ +#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */ +#define BGMAC_CMDCFG_AE 0x00400000 +#define BGMAC_CMDCFG_CFE 0x00800000 +#define BGMAC_CMDCFG_NLC 0x01000000 +#define BGMAC_CMDCFG_RL 0x02000000 +#define BGMAC_CMDCFG_RED 0x04000000 +#define BGMAC_CMDCFG_PE 0x08000000 +#define BGMAC_CMDCFG_TPI 0x10000000 +#define BGMAC_CMDCFG_AT 0x20000000 +#define BGMAC_MACADDR_HIGH 0x80c /* High 4 octets of own mac address */ +#define BGMAC_MACADDR_LOW 0x810 /* Low 2 octets of own mac address */ +#define BGMAC_RXMAX_LENGTH 0x814 /* Max receive frame length with vlan tag */ +#define BGMAC_PAUSEQUANTA 0x818 +#define BGMAC_MAC_MODE 0x844 +#define BGMAC_OUTERTAG 0x848 +#define BGMAC_INNERTAG 0x84c +#define BGMAC_TXIPG 0x85c +#define BGMAC_PAUSE_CTL 0xb30 +#define BGMAC_TX_FLUSH 0xb34 +#define BGMAC_RX_STATUS 0xb38 +#define BGMAC_TX_STATUS 0xb3c + +#define BGMAC_PHY_CTL 0x00 +#define BGMAC_PHY_CTL_SPEED_MSB 0x0040 +#define BGMAC_PHY_CTL_DUPLEX 0x0100 /* duplex mode */ +#define BGMAC_PHY_CTL_RESTART 0x0200 /* restart autonegotiation */ +#define BGMAC_PHY_CTL_ANENAB 0x1000 /* enable autonegotiation */ +#define BGMAC_PHY_CTL_SPEED 0x2000 +#define BGMAC_PHY_CTL_LOOP 0x4000 /* loopback */ +#define BGMAC_PHY_CTL_RESET 0x8000 /* reset */ +/* Helpers */ +#define BGMAC_PHY_CTL_SPEED_10 0 +#define BGMAC_PHY_CTL_SPEED_100 BGMAC_PHY_CTL_SPEED +#define BGMAC_PHY_CTL_SPEED_1000 BGMAC_PHY_CTL_SPEED_MSB +#define BGMAC_PHY_ADV 0x04 +#define BGMAC_PHY_ADV_10HALF 0x0020 /* advertise 10MBits/s half duplex */ +#define BGMAC_PHY_ADV_10FULL 0x0040 /* advertise 10MBits/s full duplex */ +#define BGMAC_PHY_ADV_100HALF 0x0080 /* advertise 100MBits/s half duplex */ +#define BGMAC_PHY_ADV_100FULL 0x0100 /* advertise 100MBits/s full duplex */ +#define BGMAC_PHY_ADV2 0x09 +#define BGMAC_PHY_ADV2_1000HALF 0x0100 /* advertise 1000MBits/s half duplex */ +#define BGMAC_PHY_ADV2_1000FULL 0x0200 /* advertise 1000MBits/s full duplex */ + +/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */ +#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */ +#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */ + +/* BCMA GMAC core specific IO status (BCMA_IOST) flags */ +#define BGMAC_BCMA_IOST_ATTACHED 0x00000800 + +#define BGMAC_NUM_MIB_TX_REGS \ + (((BGMAC_TX_Q3_OCTETS_HIGH - BGMAC_TX_GOOD_OCTETS) / 4) + 1) +#define BGMAC_NUM_MIB_RX_REGS \ + (((BGMAC_RX_UNI_PKTS - BGMAC_RX_GOOD_OCTETS) / 4) + 1) + +#define BGMAC_DMA_TX_CTL 0x00 +#define BGMAC_DMA_TX_ENABLE 0x00000001 +#define BGMAC_DMA_TX_SUSPEND 0x00000002 +#define BGMAC_DMA_TX_LOOPBACK 0x00000004 +#define BGMAC_DMA_TX_FLUSH 0x00000010 +#define BGMAC_DMA_TX_PARITY_DISABLE 0x00000800 +#define BGMAC_DMA_TX_ADDREXT_MASK 0x00030000 +#define BGMAC_DMA_TX_ADDREXT_SHIFT 16 +#define BGMAC_DMA_TX_INDEX 0x04 +#define BGMAC_DMA_TX_RINGLO 0x08 +#define BGMAC_DMA_TX_RINGHI 0x0C +#define BGMAC_DMA_TX_STATUS 0x10 +#define BGMAC_DMA_TX_STATDPTR 0x00001FFF +#define BGMAC_DMA_TX_STAT 0xF0000000 +#define BGMAC_DMA_TX_STAT_DISABLED 0x00000000 +#define BGMAC_DMA_TX_STAT_ACTIVE 0x10000000 +#define BGMAC_DMA_TX_STAT_IDLEWAIT 0x20000000 +#define BGMAC_DMA_TX_STAT_STOPPED 0x30000000 +#define BGMAC_DMA_TX_STAT_SUSP 0x40000000 +#define BGMAC_DMA_TX_ERROR 0x14 +#define BGMAC_DMA_TX_ERRDPTR 0x0001FFFF +#define BGMAC_DMA_TX_ERR 0xF0000000 +#define BGMAC_DMA_TX_ERR_NOERR 0x00000000 +#define BGMAC_DMA_TX_ERR_PROT 0x10000000 +#define BGMAC_DMA_TX_ERR_UNDERRUN 0x20000000 +#define BGMAC_DMA_TX_ERR_TRANSFER 0x30000000 +#define BGMAC_DMA_TX_ERR_DESCREAD 0x40000000 +#define BGMAC_DMA_TX_ERR_CORE 0x50000000 +#define BGMAC_DMA_RX_CTL 0x20 +#define BGMAC_DMA_RX_ENABLE 0x00000001 +#define BGMAC_DMA_RX_FRAME_OFFSET_MASK 0x000000FE +#define BGMAC_DMA_RX_FRAME_OFFSET_SHIFT 1 +#define BGMAC_DMA_RX_DIRECT_FIFO 0x00000100 +#define BGMAC_DMA_RX_OVERFLOW_CONT 0x00000400 +#define BGMAC_DMA_RX_PARITY_DISABLE 0x00000800 +#define BGMAC_DMA_RX_ADDREXT_MASK 0x00030000 +#define BGMAC_DMA_RX_ADDREXT_SHIFT 16 +#define BGMAC_DMA_RX_INDEX 0x24 +#define BGMAC_DMA_RX_RINGLO 0x28 +#define BGMAC_DMA_RX_RINGHI 0x2C +#define BGMAC_DMA_RX_STATUS 0x30 +#define BGMAC_DMA_RX_STATDPTR 0x00001FFF +#define BGMAC_DMA_RX_STAT 0xF0000000 +#define BGMAC_DMA_RX_STAT_DISABLED 0x00000000 +#define BGMAC_DMA_RX_STAT_ACTIVE 0x10000000 +#define BGMAC_DMA_RX_STAT_IDLEWAIT 0x20000000 +#define BGMAC_DMA_RX_STAT_STOPPED 0x30000000 +#define BGMAC_DMA_RX_STAT_SUSP 0x40000000 +#define BGMAC_DMA_RX_ERROR 0x34 +#define BGMAC_DMA_RX_ERRDPTR 0x0001FFFF +#define BGMAC_DMA_RX_ERR 0xF0000000 +#define BGMAC_DMA_RX_ERR_NOERR 0x00000000 +#define BGMAC_DMA_RX_ERR_PROT 0x10000000 +#define BGMAC_DMA_RX_ERR_UNDERRUN 0x20000000 +#define BGMAC_DMA_RX_ERR_TRANSFER 0x30000000 +#define BGMAC_DMA_RX_ERR_DESCREAD 0x40000000 +#define BGMAC_DMA_RX_ERR_CORE 0x50000000 + +#define BGMAC_DESC_CTL0_EOT 0x10000000 /* End of ring */ +#define BGMAC_DESC_CTL0_IOC 0x20000000 /* IRQ on complete */ +#define BGMAC_DESC_CTL0_SOF 0x40000000 /* Start of frame */ +#define BGMAC_DESC_CTL0_EOF 0x80000000 /* End of frame */ +#define BGMAC_DESC_CTL1_LEN 0x00001FFF + +#define BGMAC_PHY_NOREGS 0x1E +#define BGMAC_PHY_MASK 0x1F + +#define BGMAC_MAX_TX_RINGS 4 +#define BGMAC_MAX_RX_RINGS 1 + +#define BGMAC_TX_RING_SLOTS 128 +#define BGMAC_RX_RING_SLOTS 512 - 1 /* Why -1? Well, Broadcom does that... */ + +#define BGMAC_RX_HEADER_LEN 28 /* Last 24 bytes are unused. Well... */ +#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */ +#define BGMAC_RX_MAX_FRAME_SIZE 1536 /* Copied from b44/tg3 */ +#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE) + +#define BGMAC_BFL_ENETROBO 0x0010 /* has ephy roboswitch spi */ +#define BGMAC_BFL_ENETADM 0x0080 /* has ADMtek switch */ +#define BGMAC_BFL_ENETVLAN 0x0100 /* can do vlan */ + +#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030 +#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000 +#define BGMAC_CHIPCTL_1_IF_TYPE_MI 0x00000010 +#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020 +#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0 +#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000 +#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYMII 0x00000040 +#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII 0x00000080 +#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0 +#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000 + +#define BGMAC_SPEED_10 0x0001 +#define BGMAC_SPEED_100 0x0002 +#define BGMAC_SPEED_1000 0x0004 + +#define BGMAC_WEIGHT 64 + +#define ETHER_MAX_LEN 1518 + +struct bgmac_slot_info { + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +struct bgmac_dma_desc { + __le32 ctl0; + __le32 ctl1; + __le32 addr_low; + __le32 addr_high; +} __packed; + +enum bgmac_dma_ring_type { + BGMAC_DMA_RING_TX, + BGMAC_DMA_RING_RX, +}; + +/** + * bgmac_dma_ring - contains info about DMA ring (either TX or RX one) + * @start: index of the first slot containing data + * @end: index of a slot that can *not* be read (yet) + * + * Be really aware of the specific @end meaning. It's an index of a slot *after* + * the one containing data that can be read. If @start equals @end the ring is + * empty. + */ +struct bgmac_dma_ring { + u16 num_slots; + u16 start; + u16 end; + + u16 mmio_base; + struct bgmac_dma_desc *cpu_base; + dma_addr_t dma_base; + + struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS]; +}; + +struct bgmac_rx_header { + __le16 len; + __le16 flags; + __le16 pad[12]; +}; + +struct bgmac { + struct bcma_device *core; + struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */ + struct net_device *net_dev; + struct napi_struct napi; + + /* DMA */ + struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS]; + struct bgmac_dma_ring rx_ring[BGMAC_MAX_RX_RINGS]; + + /* Stats */ + bool stats_grabbed; + u32 mib_tx_regs[BGMAC_NUM_MIB_TX_REGS]; + u32 mib_rx_regs[BGMAC_NUM_MIB_RX_REGS]; + + /* Int */ + u32 int_mask; + u32 int_status; + + /* Speed-related */ + int speed; + bool autoneg; + bool full_duplex; + + u8 phyaddr; + bool has_robosw; + + bool loopback; +}; + +static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset) +{ + return bcma_read32(bgmac->core, offset); +} + +static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value) +{ + bcma_write32(bgmac->core, offset, value); +} + +static inline void bgmac_maskset(struct bgmac *bgmac, u16 offset, u32 mask, + u32 set) +{ + bgmac_write(bgmac, offset, (bgmac_read(bgmac, offset) & mask) | set); +} + +static inline void bgmac_mask(struct bgmac *bgmac, u16 offset, u32 mask) +{ + bgmac_maskset(bgmac, offset, mask, 0); +} + +static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set) +{ + bgmac_maskset(bgmac, offset, ~0, set); +} + +#endif /* _BGMAC_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index a1adfaf87f49..2f0ba8f2fd6c 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8543,7 +8543,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); memcpy(dev->dev_addr, bp->mac_addr, 6); - memcpy(dev->perm_addr, bp->mac_addr, 6); dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile index 48fbdd48f88f..116762daae09 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/Makefile +++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile @@ -4,4 +4,5 @@ obj-$(CONFIG_BNX2X) += bnx2x.o -bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o +bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o +bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index e8d4db10c8f3..e4605a965084 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1,6 +1,6 @@ /* bnx2x.h: Broadcom Everest network driver. * - * Copyright (c) 2007-2012 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -13,9 +13,12 @@ #ifndef BNX2X_H #define BNX2X_H + +#include <linux/pci.h> #include <linux/netdevice.h> #include <linux/dma-mapping.h> #include <linux/types.h> +#include <linux/pci_regs.h> /* compilation time flags */ @@ -23,8 +26,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.78.00-0" -#define DRV_MODULE_RELDATE "2012/09/27" +#define DRV_MODULE_VERSION "1.78.02-0" +#define DRV_MODULE_RELDATE "2013/01/14" #define BNX2X_BC_VER 0x040200 #if defined(CONFIG_DCB) @@ -48,6 +51,13 @@ #include "bnx2x_sp.h" #include "bnx2x_dcb.h" #include "bnx2x_stats.h" +#include "bnx2x_vfpf.h" + +enum bnx2x_int_mode { + BNX2X_INT_MODE_MSIX, + BNX2X_INT_MODE_INTX, + BNX2X_INT_MODE_MSI +}; /* error/debug prints */ @@ -112,29 +122,29 @@ do { \ dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \ } while (0) +/* Error handling */ +void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int); #ifdef BNX2X_STOP_ON_ERROR -void bnx2x_int_disable(struct bnx2x *bp); #define bnx2x_panic() \ do { \ bp->panic = 1; \ BNX2X_ERR("driver assert\n"); \ - bnx2x_int_disable(bp); \ - bnx2x_panic_dump(bp); \ + bnx2x_panic_dump(bp, true); \ } while (0) #else #define bnx2x_panic() \ do { \ bp->panic = 1; \ BNX2X_ERR("driver assert\n"); \ - bnx2x_panic_dump(bp); \ + bnx2x_panic_dump(bp, false); \ } while (0) #endif #define bnx2x_mc_addr(ha) ((ha)->addr) #define bnx2x_uc_addr(ha) ((ha)->addr) -#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) -#define U64_HI(x) (u32)(((u64)(x)) >> 32) +#define U64_LO(x) ((u32)(((u64)(x)) & 0xffffffff)) +#define U64_HI(x) ((u32)(((u64)(x)) >> 32)) #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) @@ -334,6 +344,9 @@ union db_prod { #define SGE_PAGE_SIZE PAGE_SIZE #define SGE_PAGE_SHIFT PAGE_SHIFT #define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) +#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE) +#define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \ + SGE_PAGES), 0xffff) /* SGE ring related macros */ #define NUM_RX_SGE_PAGES 2 @@ -789,48 +802,63 @@ struct bnx2x_common { #define CHIP_NUM_57711E 0x1650 #define CHIP_NUM_57712 0x1662 #define CHIP_NUM_57712_MF 0x1663 +#define CHIP_NUM_57712_VF 0x166f #define CHIP_NUM_57713 0x1651 #define CHIP_NUM_57713E 0x1652 #define CHIP_NUM_57800 0x168a #define CHIP_NUM_57800_MF 0x16a5 +#define CHIP_NUM_57800_VF 0x16a9 #define CHIP_NUM_57810 0x168e #define CHIP_NUM_57810_MF 0x16ae +#define CHIP_NUM_57810_VF 0x16af #define CHIP_NUM_57811 0x163d #define CHIP_NUM_57811_MF 0x163e -#define CHIP_NUM_57840_OBSOLETE 0x168d +#define CHIP_NUM_57811_VF 0x163f +#define CHIP_NUM_57840_OBSOLETE 0x168d #define CHIP_NUM_57840_MF_OBSOLETE 0x16ab #define CHIP_NUM_57840_4_10 0x16a1 #define CHIP_NUM_57840_2_20 0x16a2 #define CHIP_NUM_57840_MF 0x16a4 +#define CHIP_NUM_57840_VF 0x16ad #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) #define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) +#define CHIP_IS_57712_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_VF) #define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF) #define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800) #define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) +#define CHIP_IS_57800_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_VF) #define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) #define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) +#define CHIP_IS_57810_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_VF) #define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811) #define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF) +#define CHIP_IS_57811_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_VF) #define CHIP_IS_57840(bp) \ ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \ (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \ (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) #define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \ (CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE)) +#define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF) #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ CHIP_IS_57711E(bp)) #define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ - CHIP_IS_57712_MF(bp)) + CHIP_IS_57712_MF(bp) || \ + CHIP_IS_57712_VF(bp)) #define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \ CHIP_IS_57800_MF(bp) || \ + CHIP_IS_57800_VF(bp) || \ CHIP_IS_57810(bp) || \ CHIP_IS_57810_MF(bp) || \ + CHIP_IS_57810_VF(bp) || \ CHIP_IS_57811(bp) || \ CHIP_IS_57811_MF(bp) || \ + CHIP_IS_57811_VF(bp) || \ CHIP_IS_57840(bp) || \ - CHIP_IS_57840_MF(bp)) + CHIP_IS_57840_MF(bp) || \ + CHIP_IS_57840_VF(bp)) #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) #define USES_WARPCORE(bp) (CHIP_IS_E3(bp)) #define IS_E1H_OFFSET (!CHIP_IS_E1(bp)) @@ -954,6 +982,11 @@ struct bnx2x_port { extern struct workqueue_struct *bnx2x_wq; #define BNX2X_MAX_NUM_OF_VFS 64 +#define BNX2X_VF_CID_WND 0 +#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) +#define BNX2X_CLIENTS_PER_VF 1 +#define BNX2X_FIRST_VF_CID 256 +#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF) #define BNX2X_VF_ID_INVALID 0xFF /* @@ -1104,6 +1137,7 @@ struct hw_context { /* forward */ struct bnx2x_ilt; +struct bnx2x_vfdb; enum bnx2x_recovery_state { BNX2X_RECOVERY_DONE, @@ -1165,19 +1199,22 @@ struct bnx2x_fw_stats_req { }; struct bnx2x_fw_stats_data { - struct stats_counter storm_counters; - struct per_port_stats port; - struct per_pf_stats pf; + struct stats_counter storm_counters; + struct per_port_stats port; + struct per_pf_stats pf; struct fcoe_statistics_params fcoe; - struct per_queue_stats queue_stats[1]; + struct per_queue_stats queue_stats[1]; }; /* Public slow path states */ enum { BNX2X_SP_RTNL_SETUP_TC, BNX2X_SP_RTNL_TX_TIMEOUT, - BNX2X_SP_RTNL_AFEX_F_UPDATE, BNX2X_SP_RTNL_FAN_FAILURE, + BNX2X_SP_RTNL_AFEX_F_UPDATE, + BNX2X_SP_RTNL_ENABLE_SRIOV, + BNX2X_SP_RTNL_VFPF_MCAST, + BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, }; @@ -1231,6 +1268,21 @@ struct bnx2x { (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) #define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) +#ifdef CONFIG_BNX2X_SRIOV + /* vf pf channel mailbox contains request and response buffers */ + struct bnx2x_vf_mbx_msg *vf2pf_mbox; + dma_addr_t vf2pf_mbox_mapping; + + /* we set aside a copy of the acquire response */ + struct pfvf_acquire_resp_tlv acquire_resp; + + /* bulletin board for messages from pf to vf */ + union pf_vf_bulletin *pf2vf_bulletin; + dma_addr_t pf2vf_bulletin_mapping; + + struct pf_vf_bulletin_content old_bulletin; +#endif /* CONFIG_BNX2X_SRIOV */ + struct net_device *dev; struct pci_dev *pdev; @@ -1295,8 +1347,6 @@ struct bnx2x { __le16 *eq_cons_sb; atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ - - /* Counter for marking that there is a STAT_QUERY ramrod pending */ u16 stats_pending; /* Counter for completed statistics ramrods */ @@ -1318,8 +1368,6 @@ struct bnx2x { #define DISABLE_MSI_FLAG (1 << 7) #define TPA_ENABLE_FLAG (1 << 8) #define NO_MCP_FLAG (1 << 9) - -#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) #define GRO_ENABLE_FLAG (1 << 10) #define MF_FUNC_DIS (1 << 11) #define OWN_CNIC_IRQ (1 << 12) @@ -1330,6 +1378,17 @@ struct bnx2x { #define BC_SUPPORTS_FCOE_FEATURES (1 << 19) #define USING_SINGLE_MSIX_FLAG (1 << 20) #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) +#define IS_VF_FLAG (1 << 22) + +#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) + +#ifdef CONFIG_BNX2X_SRIOV +#define IS_VF(bp) ((bp)->flags & IS_VF_FLAG) +#define IS_PF(bp) (!((bp)->flags & IS_VF_FLAG)) +#else +#define IS_VF(bp) false +#define IS_PF(bp) true +#endif #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) @@ -1349,6 +1408,7 @@ struct bnx2x { int mrrs; struct delayed_work sp_task; + atomic_t interrupt_occurred; struct delayed_work sp_rtnl_task; struct delayed_work period_task; @@ -1432,6 +1492,7 @@ struct bnx2x { u8 igu_sb_cnt; u8 min_msix_vec_cnt; + u32 igu_base_addr; dma_addr_t def_status_blk_mapping; struct bnx2x_slowpath *slowpath; @@ -1580,6 +1641,9 @@ struct bnx2x { char fw_ver[32]; const struct firmware *firmware; + struct bnx2x_vfdb *vfdb; +#define IS_SRIOV(bp) ((bp)->vfdb) + /* DCB support on/off */ u16 dcb_state; #define BNX2X_DCB_STATE_OFF 0 @@ -1599,6 +1663,10 @@ struct bnx2x { int dcb_version; /* CAM credit pools */ + + /* used only in sriov */ + struct bnx2x_credit_pool_obj vlans_pool; + struct bnx2x_credit_pool_obj macs_pool; /* RX_MODE object */ @@ -1636,6 +1704,9 @@ struct bnx2x { /* priority to cos mapping */ u8 prio_to_cos[8]; + + int fp_array_size; + u32 dump_preset_idx; }; /* Tx queues may be less or equal to Rx queues */ @@ -1813,12 +1884,16 @@ int bnx2x_del_all_macs(struct bnx2x *bp, /* Init Function API */ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); +void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, + u8 vf_valid, int fw_sb_id, int igu_sb_id); +u32 bnx2x_get_pretend_reg(struct bnx2x *bp); int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); void bnx2x_read_mf_cfg(struct bnx2x *bp); +int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val); /* dmae */ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); @@ -1830,6 +1905,18 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, bool with_comp, u8 comp_type); +void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, + u8 src_type, u8 dst_type); +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); +void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl); + +/* FLR related routines */ +u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); +void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count); +int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt); +u8 bnx2x_is_pcie_pending(struct pci_dev *dev); +int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, + char *msg, u32 poll_cnt); void bnx2x_calc_fc_adv(struct bnx2x *bp); int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, @@ -1854,6 +1941,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, return val; } +void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, + bool is_pf); + #define BNX2X_ILT_ZALLOC(x, y, size) \ do { \ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ @@ -1990,10 +2080,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ BNX2X_PHY_LOOPBACK_FAILED) - #define STROM_ASSERT_ARRAY_SIZE 50 - /* must be used on a CID before placing it on a HW ring */ #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ @@ -2024,7 +2112,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, /* Memory of fairness algorithm . 2 cycles */ #define FAIR_MEM 2 - #define ATTN_NIG_FOR_FUNC (1L << 8) #define ATTN_SW_TIMER_4_FUNC (1L << 9) #define GPIO_2_FUNC (1L << 10) @@ -2067,6 +2154,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT) #define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ @@ -2128,7 +2216,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define MULTI_MASK 0x7f - #define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func) #define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func) #define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func) @@ -2156,18 +2243,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, (&bp->def_status_blk->sp_sb.\ index_values[HC_SP_INDEX_ETH_DEF_CONS]) -#define SET_FLAG(value, mask, flag) \ - do {\ - (value) &= ~(mask);\ - (value) |= ((flag) << (mask##_SHIFT));\ - } while (0) - -#define GET_FLAG(value, mask) \ - (((value) & (mask)) >> (mask##_SHIFT)) - -#define GET_FIELD(value, fname) \ - (((value) & (fname##_MASK)) >> (fname##_SHIFT)) - #define CAM_IS_INVALID(x) \ (GET_FLAG(x.flags, \ MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \ @@ -2178,7 +2253,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \ TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4) - #ifndef PXP2_REG_PXP2_INT_STS #define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 #endif @@ -2190,9 +2264,16 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define BNX2X_VPD_LEN 128 #define VENDOR_ID_LEN 4 +#define VF_ACQUIRE_THRESH 3 +#define VF_ACQUIRE_MAC_FILTERS 1 +#define VF_ACQUIRE_MC_FILTERS 10 + +#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \ + (!((me_reg) & ME_REG_VF_ERR))) +int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code); /* Congestion management fairness mode */ -#define CMNG_FNS_NONE 0 -#define CMNG_FNS_MINMAX 1 +#define CMNG_FNS_NONE 0 +#define CMNG_FNS_MINMAX 1 #define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/ #define HC_SEG_ACCESS_ATTN 4 @@ -2208,7 +2289,6 @@ static const u32 dmae_reg_go_c[] = { void bnx2x_set_ethtool_ops(struct net_device *netdev); void bnx2x_notify_link_changed(struct bnx2x *bp); - #define BNX2X_MF_SD_PROTOCOL(bp) \ ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK) @@ -2229,6 +2309,18 @@ void bnx2x_notify_link_changed(struct bnx2x *bp); (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) +#define SET_FLAG(value, mask, flag) \ + do {\ + (value) &= ~(mask);\ + (value) |= ((flag) << (mask##_SHIFT));\ + } while (0) + +#define GET_FLAG(value, mask) \ + (((value) & (mask)) >> (mask##_SHIFT)) + +#define GET_FIELD(value, fname) \ + (((value) & (fname##_MASK)) >> (fname##_SHIFT)) + enum { SWITCH_UPDATE, AFEX_UPDATE, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index a5edac8df67b..ecac04a3687c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1,6 +1,6 @@ /* bnx2x_cmn.c: Broadcom Everest network driver. * - * Copyright (c) 2007-2012 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,6 +21,7 @@ #include <linux/if_vlan.h> #include <linux/interrupt.h> #include <linux/ip.h> +#include <net/tcp.h> #include <net/ipv6.h> #include <net/ip6_checksum.h> #include <linux/prefetch.h> @@ -28,8 +29,6 @@ #include "bnx2x_init.h" #include "bnx2x_sp.h" - - /** * bnx2x_move_fp - move content of the fastpath structure. * @@ -87,6 +86,34 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) } /** + * bnx2x_fill_fw_str - Fill buffer with FW version string. + * + * @bp: driver handle + * @buf: character buffer to fill with the fw name + * @buf_len: length of the above buffer + * + */ +void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) +{ + if (IS_PF(bp)) { + u8 phy_fw_ver[PHY_FW_VER_LEN]; + + phy_fw_ver[0] = '\0'; + bnx2x_get_ext_phy_fw_version(&bp->link_params, + phy_fw_ver, PHY_FW_VER_LEN); + strlcpy(buf, bp->fw_ver, buf_len); + snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), + "bc %d.%d.%d%s%s", + (bp->common.bc_ver & 0xff0000) >> 16, + (bp->common.bc_ver & 0xff00) >> 8, + (bp->common.bc_ver & 0xff), + ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); + } else { + bnx2x_vf_fill_fw_str(bp, buf, buf_len); + } +} + +/** * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact * * @bp: driver handle @@ -210,7 +237,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) txdata->txq_index, hw_cons, sw_cons, pkt_cons); bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, - &pkts_compl, &bytes_compl); + &pkts_compl, &bytes_compl); sw_cons++; } @@ -316,14 +343,14 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, fp->last_max_sge, fp->rx_sge_prod); } -/* Set Toeplitz hash value in the skb using the value from the +/* Get Toeplitz hash value in the skb using the value from the * CQE (calculated by HW). */ static u32 bnx2x_get_rxhash(const struct bnx2x *bp, const struct eth_fast_path_rx_cqe *cqe, bool *l4_rxhash) { - /* Set Toeplitz hash from CQE */ + /* Get Toeplitz hash from CQE */ if ((bp->dev->features & NETIF_F_RXHASH) && (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) { enum eth_rss_hash_type htype; @@ -390,8 +417,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash); if (fp->mode == TPA_MODE_GRO) { u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); - tpa_info->full_page = - SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size; + tpa_info->full_page = SGE_PAGES / gro_size * gro_size; tpa_info->gro_size = gro_size; } @@ -412,31 +438,34 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, */ #define TPA_TSTAMP_OPT_LEN 12 /** - * bnx2x_set_lro_mss - calculate the approximate value of the MSS + * bnx2x_set_gro_params - compute GRO values * - * @bp: driver handle + * @skb: packet skb * @parsing_flags: parsing flags from the START CQE * @len_on_bd: total length of the first packet for the * aggregation. + * @pkt_len: length of all segments * * Approximate value of the MSS for this aggregation calculated using * the first packet of it. + * Compute number of aggregated segments, and gso_type. */ -static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, - u16 len_on_bd) +static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, + u16 len_on_bd, unsigned int pkt_len) { - /* - * TPA arrgregation won't have either IP options or TCP options + /* TPA aggregation won't have either IP options or TCP options * other than timestamp or IPv6 extension headers. */ u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == - PRS_FLAG_OVERETH_IPV6) + PRS_FLAG_OVERETH_IPV6) { hdrs_len += sizeof(struct ipv6hdr); - else /* IPv4 */ + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + } else { hdrs_len += sizeof(struct iphdr); - + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + } /* Check if there was a TCP timestamp, if there is it's will * always be 12 bytes length: nop nop kind length echo val. @@ -446,7 +475,13 @@ static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) hdrs_len += TPA_TSTAMP_OPT_LEN; - return len_on_bd - hdrs_len; + skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len; + + /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count + * to skb_shinfo(skb)->gso_segs + */ + NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len, + skb_shinfo(skb)->gso_size); } static int bnx2x_alloc_rx_sge(struct bnx2x *bp, @@ -463,7 +498,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, } mapping = dma_map_page(&bp->pdev->dev, page, 0, - SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + SGE_PAGES, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { __free_pages(page, PAGES_PER_SGE_SHIFT); BNX2X_ERR("Can't map sge\n"); @@ -500,20 +535,12 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, } /* This is needed in order to enable forwarding support */ - if (frag_size) { - skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, - tpa_info->parsing_flags, len_on_bd); - - skb_shinfo(skb)->gso_type = - (GET_FLAG(tpa_info->parsing_flags, - PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == - PRS_FLAG_OVERETH_IPV6) ? - SKB_GSO_TCPV6 : SKB_GSO_TCPV4; - } - + if (frag_size) + bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd, + le16_to_cpu(cqe->pkt_len)); #ifdef BNX2X_STOP_ON_ERROR - if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { + if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) { BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", pages, cqe_idx); BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); @@ -531,8 +558,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, if (fp->mode == TPA_MODE_GRO) frag_len = min_t(u32, frag_size, (u32)full_page); else /* LRO */ - frag_len = min_t(u32, frag_size, - (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE)); + frag_len = min_t(u32, frag_size, (u32)SGE_PAGES); rx_pg = &fp->rx_page_ring[sge_idx]; old_rx_pg = *rx_pg; @@ -548,7 +574,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* Unmap the page as we r going to pass it to the stack */ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(&old_rx_pg, mapping), - SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + SGE_PAGES, DMA_FROM_DEVICE); /* Add one frag and update the appropriate fields in the skb */ if (fp->mode == TPA_MODE_LRO) skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); @@ -566,7 +592,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, } skb->data_len += frag_len; - skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE; + skb->truesize += SGE_PAGES; skb->len += frag_len; frag_size -= frag_len; @@ -591,6 +617,54 @@ static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp) return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); } +#ifdef CONFIG_INET +static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb) +{ + const struct iphdr *iph = ip_hdr(skb); + struct tcphdr *th; + + skb_set_transport_header(skb, sizeof(struct iphdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), + iph->saddr, iph->daddr, 0); +} + +static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb) +{ + struct ipv6hdr *iph = ipv6_hdr(skb); + struct tcphdr *th; + + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), + &iph->saddr, &iph->daddr, 0); +} +#endif + +static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, + struct sk_buff *skb) +{ +#ifdef CONFIG_INET + if (skb_shinfo(skb)->gso_size) { + skb_set_network_header(skb, 0); + switch (be16_to_cpu(skb->protocol)) { + case ETH_P_IP: + bnx2x_gro_ip_csum(bp, skb); + break; + case ETH_P_IPV6: + bnx2x_gro_ipv6_csum(bp, skb); + break; + default: + BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n", + be16_to_cpu(skb->protocol)); + } + tcp_gro_complete(skb); + } +#endif + napi_gro_receive(&fp->napi, skb); +} static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_agg_info *tpa_info, @@ -645,7 +719,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, skb, cqe, cqe_idx)) { if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); - napi_gro_receive(&fp->napi, skb); + bnx2x_gro_receive(bp, fp, skb); } else { DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages - dropping packet!\n"); @@ -1087,7 +1161,7 @@ void __bnx2x_link_report(struct bnx2x *bp) struct bnx2x_link_report_data cur_data; /* reread mf_cfg */ - if (!CHIP_IS_E1(bp)) + if (IS_PF(bp) && !CHIP_IS_E1(bp)) bnx2x_read_mf_cfg(bp); /* Read the current link report info */ @@ -1429,10 +1503,14 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) if (nvecs == offset) return; - free_irq(bp->msix_table[offset].vector, bp->dev); - DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", - bp->msix_table[offset].vector); - offset++; + + /* VFs don't have a default SB */ + if (IS_PF(bp)) { + free_irq(bp->msix_table[offset].vector, bp->dev); + DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", + bp->msix_table[offset].vector); + offset++; + } if (CNIC_SUPPORT(bp)) { if (nvecs == offset) @@ -1453,21 +1531,30 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) void bnx2x_free_irq(struct bnx2x *bp) { if (bp->flags & USING_MSIX_FLAG && - !(bp->flags & USING_SINGLE_MSIX_FLAG)) - bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + - CNIC_SUPPORT(bp) + 1); - else + !(bp->flags & USING_SINGLE_MSIX_FLAG)) { + int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp); + + /* vfs don't have a default status block */ + if (IS_PF(bp)) + nvecs++; + + bnx2x_free_msix_irqs(bp, nvecs); + } else { free_irq(bp->dev->irq, bp->dev); + } } int bnx2x_enable_msix(struct bnx2x *bp) { - int msix_vec = 0, i, rc, req_cnt; + int msix_vec = 0, i, rc; - bp->msix_table[msix_vec].entry = msix_vec; - BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n", - bp->msix_table[0].entry); - msix_vec++; + /* VFs don't have a default status block */ + if (IS_PF(bp)) { + bp->msix_table[msix_vec].entry = msix_vec; + BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n", + bp->msix_table[0].entry); + msix_vec++; + } /* Cnic requires an msix vector for itself */ if (CNIC_SUPPORT(bp)) { @@ -1485,9 +1572,10 @@ int bnx2x_enable_msix(struct bnx2x *bp) msix_vec++; } - req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1; + DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n", + msix_vec); - rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); + rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec); /* * reconfigure number of tx/rx queues according to available @@ -1495,7 +1583,7 @@ int bnx2x_enable_msix(struct bnx2x *bp) */ if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) { /* how less vectors we will have? */ - int diff = req_cnt - rc; + int diff = msix_vec - rc; BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); @@ -1549,12 +1637,15 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) { int i, rc, offset = 0; - rc = request_irq(bp->msix_table[offset++].vector, - bnx2x_msix_sp_int, 0, - bp->dev->name, bp->dev); - if (rc) { - BNX2X_ERR("request sp irq failed\n"); - return -EBUSY; + /* no default status block for vf */ + if (IS_PF(bp)) { + rc = request_irq(bp->msix_table[offset++].vector, + bnx2x_msix_sp_int, 0, + bp->dev->name, bp->dev); + if (rc) { + BNX2X_ERR("request sp irq failed\n"); + return -EBUSY; + } } if (CNIC_SUPPORT(bp)) @@ -1578,12 +1669,20 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) } i = BNX2X_NUM_ETH_QUEUES(bp); - offset = 1 + CNIC_SUPPORT(bp); - netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", - bp->msix_table[0].vector, - 0, bp->msix_table[offset].vector, - i - 1, bp->msix_table[offset + i - 1].vector); - + if (IS_PF(bp)) { + offset = 1 + CNIC_SUPPORT(bp); + netdev_info(bp->dev, + "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", + bp->msix_table[0].vector, + 0, bp->msix_table[offset].vector, + i - 1, bp->msix_table[offset + i - 1].vector); + } else { + offset = CNIC_SUPPORT(bp); + netdev_info(bp->dev, + "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n", + 0, bp->msix_table[offset].vector, + i - 1, bp->msix_table[offset + i - 1].vector); + } return 0; } @@ -1628,7 +1727,6 @@ static int bnx2x_setup_irqs(struct bnx2x *bp) if (rc) return rc; } else { - bnx2x_ack_int(bp); rc = bnx2x_req_irq(bp); if (rc) { BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); @@ -1726,7 +1824,6 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); } - void bnx2x_set_num_queues(struct bnx2x *bp) { /* RSS queues */ @@ -1991,27 +2088,212 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp) } while (0) #endif /*BNX2X_STOP_ON_ERROR*/ -bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err) +static void bnx2x_free_fw_stats_mem(struct bnx2x *bp) +{ + BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + return; +} + +static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) { - /* build FW version dword */ - u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) + - (BCM_5710_FW_MINOR_VERSION << 8) + - (BCM_5710_FW_REVISION_VERSION << 16) + - (BCM_5710_FW_ENGINEERING_VERSION << 24); + int num_groups, vf_headroom = 0; + int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; + + /* number of queues for statistics is number of eth queues + FCoE */ + u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; + + /* Total number of FW statistics requests = + * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper + * and fcoe l2 queue) stats + num of queues (which includes another 1 + * for fcoe l2 queue if applicable) + */ + bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; + + /* vf stats appear in the request list, but their data is allocated by + * the VFs themselves. We don't include them in the bp->fw_stats_num as + * it is used to determine where to place the vf stats queries in the + * request struct + */ + if (IS_SRIOV(bp)) + vf_headroom = bnx2x_vf_headroom(bp); - /* read loaded FW from chip */ - u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); + /* Request is built from stats_query_header and an array of + * stats_query_cmd_group each of which contains + * STATS_QUERY_CMD_COUNT rules. The real number or requests is + * configured in the stats_query_header. + */ + num_groups = + (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + + (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? + 1 : 0)); + + DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n", + bp->fw_stats_num, vf_headroom, num_groups); + bp->fw_stats_req_sz = sizeof(struct stats_query_header) + + num_groups * sizeof(struct stats_query_cmd_group); + + /* Data for statistics requests + stats_counter + * stats_counter holds per-STORM counters that are incremented + * when STORM has finished with the current request. + * memory for FCoE offloaded statistics are counted anyway, + * even if they will not be sent. + * VF stats are not accounted for here as the data of VF stats is stored + * in memory allocated by the VF, not here. + */ + bp->fw_stats_data_sz = sizeof(struct per_port_stats) + + sizeof(struct per_pf_stats) + + sizeof(struct fcoe_statistics_params) + + sizeof(struct per_queue_stats) * num_queue_stats + + sizeof(struct stats_counter); + + BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + + /* Set shortcuts */ + bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; + bp->fw_stats_req_mapping = bp->fw_stats_mapping; + bp->fw_stats_data = (struct bnx2x_fw_stats_data *) + ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); + bp->fw_stats_data_mapping = bp->fw_stats_mapping + + bp->fw_stats_req_sz; + + DP(BNX2X_MSG_SP, "statistics request base address set to %x %x", + U64_HI(bp->fw_stats_req_mapping), + U64_LO(bp->fw_stats_req_mapping)); + DP(BNX2X_MSG_SP, "statistics data base address set to %x %x", + U64_HI(bp->fw_stats_data_mapping), + U64_LO(bp->fw_stats_data_mapping)); + return 0; - DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw); +alloc_mem_err: + bnx2x_free_fw_stats_mem(bp); + BNX2X_ERR("Can't allocate FW stats memory\n"); + return -ENOMEM; +} + +/* send load request to mcp and analyze response */ +static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) +{ + /* init fw_seq */ + bp->fw_seq = + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); + + /* Get current FW pulse sequence */ + bp->fw_drv_pulse_wr_seq = + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) & + DRV_PULSE_SEQ_MASK); + BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); + + /* load request */ + (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, + DRV_MSG_CODE_LOAD_REQ_WITH_LFA); + + /* if mcp fails to respond we must abort */ + if (!(*load_code)) { + BNX2X_ERR("MCP response failure, aborting\n"); + return -EBUSY; + } + + /* If mcp refused (e.g. other port is in diagnostic mode) we + * must abort + */ + if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { + BNX2X_ERR("MCP refused load request, aborting\n"); + return -EBUSY; + } + return 0; +} - if (loaded_fw != my_fw) { - if (is_err) - BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n", +/* check whether another PF has already loaded FW to chip. In + * virtualized environments a pf from another VM may have already + * initialized the device including loading FW + */ +int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code) +{ + /* is another pf loaded on this engine? */ + if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && + load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { + /* build my FW version dword */ + u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) + + (BCM_5710_FW_MINOR_VERSION << 8) + + (BCM_5710_FW_REVISION_VERSION << 16) + + (BCM_5710_FW_ENGINEERING_VERSION << 24); + + /* read loaded FW from chip */ + u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); + + DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n", + loaded_fw, my_fw); + + /* abort nic load if version mismatch */ + if (my_fw != loaded_fw) { + BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n", loaded_fw, my_fw); - return false; + return -EBUSY; + } + } + return 0; +} + +/* returns the "mcp load_code" according to global load_count array */ +static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) +{ + int path = BP_PATH(bp); + + DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + load_count[path][0]++; + load_count[path][1 + port]++; + DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + if (load_count[path][0] == 1) + return FW_MSG_CODE_DRV_LOAD_COMMON; + else if (load_count[path][1 + port] == 1) + return FW_MSG_CODE_DRV_LOAD_PORT; + else + return FW_MSG_CODE_DRV_LOAD_FUNCTION; +} + +/* mark PMF if applicable */ +static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code) +{ + if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || + (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || + (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { + bp->port.pmf = 1; + /* We need the barrier to ensure the ordering between the + * writing to bp->port.pmf here and reading it from the + * bnx2x_periodic_task(). + */ + smp_mb(); + } else { + bp->port.pmf = 0; + } + + DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); +} + +static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) +{ + if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || + (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && + (bp->common.shmem2_base)) { + if (SHMEM2_HAS(bp, dcc_support)) + SHMEM2_WR(bp, dcc_support, + (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | + SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); + if (SHMEM2_HAS(bp, afex_driver_support)) + SHMEM2_WR(bp, afex_driver_support, + SHMEM_AFEX_SUPPORTED_VERSION_ONE); } - return true; + /* Set AFEX default VLAN tag to an invalid value */ + bp->afex_def_vlan_tag = -1; } /** @@ -2026,49 +2308,15 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err) static void bnx2x_bz_fp(struct bnx2x *bp, int index) { struct bnx2x_fastpath *fp = &bp->fp[index]; - struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index]; int cos; struct napi_struct orig_napi = fp->napi; struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; /* bzero bnx2x_fastpath contents */ - if (bp->stats_init) { - memset(fp->tpa_info, 0, sizeof(*fp->tpa_info)); - memset(fp, 0, sizeof(*fp)); - } else { - /* Keep Queue statistics */ - struct bnx2x_eth_q_stats *tmp_eth_q_stats; - struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old; - - tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats), - GFP_KERNEL); - if (tmp_eth_q_stats) - memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats, - sizeof(struct bnx2x_eth_q_stats)); - - tmp_eth_q_stats_old = - kzalloc(sizeof(struct bnx2x_eth_q_stats_old), - GFP_KERNEL); - if (tmp_eth_q_stats_old) - memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old, - sizeof(struct bnx2x_eth_q_stats_old)); - - memset(fp->tpa_info, 0, sizeof(*fp->tpa_info)); - memset(fp, 0, sizeof(*fp)); - - if (tmp_eth_q_stats) { - memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats, - sizeof(struct bnx2x_eth_q_stats)); - kfree(tmp_eth_q_stats); - } - - if (tmp_eth_q_stats_old) { - memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old, - sizeof(struct bnx2x_eth_q_stats_old)); - kfree(tmp_eth_q_stats_old); - } - - } + if (fp->tpa_info) + memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * + sizeof(struct bnx2x_agg_info)); + memset(fp, 0, sizeof(*fp)); /* Restore the NAPI object as it has been already initialized */ fp->napi = orig_napi; @@ -2114,10 +2362,12 @@ int bnx2x_load_cnic(struct bnx2x *bp) mutex_init(&bp->cnic_mutex); - rc = bnx2x_alloc_mem_cnic(bp); - if (rc) { - BNX2X_ERR("Unable to allocate bp memory for cnic\n"); - LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); + if (IS_PF(bp)) { + rc = bnx2x_alloc_mem_cnic(bp); + if (rc) { + BNX2X_ERR("Unable to allocate bp memory for cnic\n"); + LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); + } } rc = bnx2x_alloc_fp_mem_cnic(bp); @@ -2144,14 +2394,17 @@ int bnx2x_load_cnic(struct bnx2x *bp) bnx2x_nic_init_cnic(bp); - /* Enable Timer scan */ - REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); + if (IS_PF(bp)) { + /* Enable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); - for_each_cnic_queue(bp, i) { - rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); - if (rc) { - BNX2X_ERR("Queue setup failed\n"); - LOAD_ERROR_EXIT(bp, load_error_cnic2); + /* setup cnic queues */ + for_each_cnic_queue(bp, i) { + rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); + if (rc) { + BNX2X_ERR("Queue setup failed\n"); + LOAD_ERROR_EXIT(bp, load_error_cnic2); + } } } @@ -2192,13 +2445,11 @@ load_error_cnic0: #endif /* ! BNX2X_STOP_ON_ERROR */ } - /* must be called with rtnl_lock */ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) { int port = BP_PORT(bp); - u32 load_code; - int i, rc; + int i, rc = 0, load_code = 0; DP(NETIF_MSG_IFUP, "Starting NIC load\n"); DP(NETIF_MSG_IFUP, @@ -2213,15 +2464,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; - /* Set the initial link reported state to link down */ - bnx2x_acquire_phy_lock(bp); memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, &bp->last_reported_link.link_report_flags); - bnx2x_release_phy_lock(bp); - /* must be called before memory allocation and HW init */ - bnx2x_ilt_set_info(bp); + if (IS_PF(bp)) + /* must be called before memory allocation and HW init */ + bnx2x_ilt_set_info(bp); /* * Zero fastpath structures preserving invariants like napi, which are @@ -2240,8 +2489,33 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Set the receive queues buffer size */ bnx2x_set_rx_buf_size(bp); - if (bnx2x_alloc_mem(bp)) - return -ENOMEM; + if (IS_PF(bp)) { + rc = bnx2x_alloc_mem(bp); + if (rc) { + BNX2X_ERR("Unable to allocate bp memory\n"); + return rc; + } + } + + /* Allocated memory for FW statistics */ + if (bnx2x_alloc_fw_stats_mem(bp)) + LOAD_ERROR_EXIT(bp, load_error0); + + /* need to be done after alloc mem, since it's self adjusting to amount + * of memory available for RSS queues + */ + rc = bnx2x_alloc_fp_mem(bp); + if (rc) { + BNX2X_ERR("Unable to allocate memory for fps\n"); + LOAD_ERROR_EXIT(bp, load_error0); + } + + /* request pf to initialize status blocks */ + if (IS_VF(bp)) { + rc = bnx2x_vfpf_init(bp); + if (rc) + LOAD_ERROR_EXIT(bp, load_error0); + } /* As long as bnx2x_alloc_mem() may possibly update * bp->num_queues, bnx2x_set_real_num_queues() should always @@ -2264,98 +2538,48 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) DP(NETIF_MSG_IFUP, "napi added\n"); bnx2x_napi_enable(bp); - /* set pf load just before approaching the MCP */ - bnx2x_set_pf_load(bp); - - /* Send LOAD_REQUEST command to MCP - * Returns the type of LOAD command: - * if it is the first port to be initialized - * common blocks should be initialized, otherwise - not - */ - if (!BP_NOMCP(bp)) { - /* init fw_seq */ - bp->fw_seq = - (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); - - /* Get current FW pulse sequence */ - bp->fw_drv_pulse_wr_seq = - (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) & - DRV_PULSE_SEQ_MASK); - BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); - - load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, - DRV_MSG_CODE_LOAD_REQ_WITH_LFA); - if (!load_code) { - BNX2X_ERR("MCP response failure, aborting\n"); - rc = -EBUSY; - LOAD_ERROR_EXIT(bp, load_error1); - } - if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { - BNX2X_ERR("Driver load refused\n"); - rc = -EBUSY; /* other port in diagnostic mode */ - LOAD_ERROR_EXIT(bp, load_error1); - } - if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && - load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { - /* abort nic load if version mismatch */ - if (!bnx2x_test_firmware_version(bp, true)) { - rc = -EBUSY; + if (IS_PF(bp)) { + /* set pf load just before approaching the MCP */ + bnx2x_set_pf_load(bp); + + /* if mcp exists send load request and analyze response */ + if (!BP_NOMCP(bp)) { + /* attempt to load pf */ + rc = bnx2x_nic_load_request(bp, &load_code); + if (rc) + LOAD_ERROR_EXIT(bp, load_error1); + + /* what did mcp say? */ + rc = bnx2x_nic_load_analyze_req(bp, load_code); + if (rc) { + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); LOAD_ERROR_EXIT(bp, load_error2); } + } else { + load_code = bnx2x_nic_load_no_mcp(bp, port); } - } else { - int path = BP_PATH(bp); - - DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - load_count[path][0]++; - load_count[path][1 + port]++; - DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - if (load_count[path][0] == 1) - load_code = FW_MSG_CODE_DRV_LOAD_COMMON; - else if (load_count[path][1 + port] == 1) - load_code = FW_MSG_CODE_DRV_LOAD_PORT; - else - load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; - } - - if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || - (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || - (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { - bp->port.pmf = 1; - /* - * We need the barrier to ensure the ordering between the - * writing to bp->port.pmf here and reading it from the - * bnx2x_periodic_task(). - */ - smp_mb(); - } else - bp->port.pmf = 0; - - DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf); + /* mark pmf if applicable */ + bnx2x_nic_load_pmf(bp, load_code); - /* Init Function state controlling object */ - bnx2x__init_func_obj(bp); + /* Init Function state controlling object */ + bnx2x__init_func_obj(bp); - /* Initialize HW */ - rc = bnx2x_init_hw(bp, load_code); - if (rc) { - BNX2X_ERR("HW init failed, aborting\n"); - bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - LOAD_ERROR_EXIT(bp, load_error2); + /* Initialize HW */ + rc = bnx2x_init_hw(bp, load_code); + if (rc) { + BNX2X_ERR("HW init failed, aborting\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + LOAD_ERROR_EXIT(bp, load_error2); + } } /* Connect to IRQs */ rc = bnx2x_setup_irqs(bp); if (rc) { - BNX2X_ERR("IRQs setup failed\n"); - bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + BNX2X_ERR("setup irqs failed\n"); + if (IS_PF(bp)) + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); LOAD_ERROR_EXIT(bp, load_error2); } @@ -2363,78 +2587,89 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bnx2x_nic_init(bp, load_code); /* Init per-function objects */ - bnx2x_init_bp_objs(bp); - - if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || - (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && - (bp->common.shmem2_base)) { - if (SHMEM2_HAS(bp, dcc_support)) - SHMEM2_WR(bp, dcc_support, - (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | - SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); - if (SHMEM2_HAS(bp, afex_driver_support)) - SHMEM2_WR(bp, afex_driver_support, - SHMEM_AFEX_SUPPORTED_VERSION_ONE); - } + if (IS_PF(bp)) { + bnx2x_init_bp_objs(bp); + bnx2x_iov_nic_init(bp); + + /* Set AFEX default VLAN tag to an invalid value */ + bp->afex_def_vlan_tag = -1; + bnx2x_nic_load_afex_dcc(bp, load_code); + bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; + rc = bnx2x_func_start(bp); + if (rc) { + BNX2X_ERR("Function start failed!\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - /* Set AFEX default VLAN tag to an invalid value */ - bp->afex_def_vlan_tag = -1; + LOAD_ERROR_EXIT(bp, load_error3); + } - bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; - rc = bnx2x_func_start(bp); - if (rc) { - BNX2X_ERR("Function start failed!\n"); - bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - LOAD_ERROR_EXIT(bp, load_error3); - } + /* Send LOAD_DONE command to MCP */ + if (!BP_NOMCP(bp)) { + load_code = bnx2x_fw_command(bp, + DRV_MSG_CODE_LOAD_DONE, 0); + if (!load_code) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EBUSY; + LOAD_ERROR_EXIT(bp, load_error3); + } + } - /* Send LOAD_DONE command to MCP */ - if (!BP_NOMCP(bp)) { - load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - if (!load_code) { - BNX2X_ERR("MCP response failure, aborting\n"); - rc = -EBUSY; + /* setup the leading queue */ + rc = bnx2x_setup_leading(bp); + if (rc) { + BNX2X_ERR("Setup leading failed!\n"); LOAD_ERROR_EXIT(bp, load_error3); } - } - rc = bnx2x_setup_leading(bp); - if (rc) { - BNX2X_ERR("Setup leading failed!\n"); - LOAD_ERROR_EXIT(bp, load_error3); - } + /* set up the rest of the queues */ + for_each_nondefault_eth_queue(bp, i) { + rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); + if (rc) { + BNX2X_ERR("Queue setup failed\n"); + LOAD_ERROR_EXIT(bp, load_error3); + } + } - for_each_nondefault_eth_queue(bp, i) { - rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); + /* setup rss */ + rc = bnx2x_init_rss_pf(bp); if (rc) { - BNX2X_ERR("Queue setup failed\n"); + BNX2X_ERR("PF RSS init failed\n"); LOAD_ERROR_EXIT(bp, load_error3); } - } - rc = bnx2x_init_rss_pf(bp); - if (rc) { - BNX2X_ERR("PF RSS init failed\n"); - LOAD_ERROR_EXIT(bp, load_error3); + } else { /* vf */ + for_each_eth_queue(bp, i) { + rc = bnx2x_vfpf_setup_q(bp, i); + if (rc) { + BNX2X_ERR("Queue setup failed\n"); + LOAD_ERROR_EXIT(bp, load_error3); + } + } } /* Now when Clients are configured we are ready to work */ bp->state = BNX2X_STATE_OPEN; /* Configure a ucast MAC */ - rc = bnx2x_set_eth_mac(bp, true); + if (IS_PF(bp)) + rc = bnx2x_set_eth_mac(bp, true); + else /* vf */ + rc = bnx2x_vfpf_set_mac(bp); if (rc) { BNX2X_ERR("Setting Ethernet MAC failed\n"); LOAD_ERROR_EXIT(bp, load_error3); } - if (bp->pending_max) { + if (IS_PF(bp) && bp->pending_max) { bnx2x_update_max_mf_config(bp, bp->pending_max); bp->pending_max = 0; } - if (bp->port.pmf) - bnx2x_initial_phy_init(bp, load_mode); + if (bp->port.pmf) { + rc = bnx2x_initial_phy_init(bp, load_mode); + if (rc) + LOAD_ERROR_EXIT(bp, load_error3); + } bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN; /* Start fast path */ @@ -2476,8 +2711,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) if (CNIC_ENABLED(bp)) bnx2x_load_cnic(bp); - /* mark driver is loaded in shmem2 */ - if (SHMEM2_HAS(bp, drv_capabilities_flag)) { + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { + /* mark driver is loaded in shmem2 */ u32 val; val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], @@ -2486,7 +2721,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } /* Wait for all pending SP commands to complete */ - if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { + if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) { BNX2X_ERR("Timeout waiting for SP elements to complete\n"); bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); return -EBUSY; @@ -2502,10 +2737,12 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) #ifndef BNX2X_STOP_ON_ERROR load_error3: - bnx2x_int_disable_sync(bp, 1); + if (IS_PF(bp)) { + bnx2x_int_disable_sync(bp, 1); - /* Clean queueable objects */ - bnx2x_squeeze_objects(bp); + /* Clean queueable objects */ + bnx2x_squeeze_objects(bp); + } /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); @@ -2515,7 +2752,7 @@ load_error3: /* Release IRQs */ bnx2x_free_irq(bp); load_error2: - if (!BP_NOMCP(bp)) { + if (IS_PF(bp) && !BP_NOMCP(bp)) { bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); } @@ -2523,15 +2760,35 @@ load_error2: bp->port.pmf = 0; load_error1: bnx2x_napi_disable(bp); + /* clear pf_load status, as it was already set */ - bnx2x_clear_pf_load(bp); + if (IS_PF(bp)) + bnx2x_clear_pf_load(bp); load_error0: + bnx2x_free_fp_mem(bp); + bnx2x_free_fw_stats_mem(bp); bnx2x_free_mem(bp); return rc; #endif /* ! BNX2X_STOP_ON_ERROR */ } +static int bnx2x_drain_tx_queues(struct bnx2x *bp) +{ + u8 rc = 0, cos, i; + + /* Wait until tx fastpath tasks complete */ + for_each_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + for_each_cos_in_tx_queue(fp, cos) + rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); + if (rc) + return rc; + } + return 0; +} + /* must be called with rtnl_lock */ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) { @@ -2541,15 +2798,16 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) DP(NETIF_MSG_IFUP, "Starting NIC unload\n"); /* mark driver is unloaded in shmem2 */ - if (SHMEM2_HAS(bp, drv_capabilities_flag)) { + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { u32 val; val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); } - if ((bp->state == BNX2X_STATE_CLOSED) || - (bp->state == BNX2X_STATE_ERROR)) { + if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && + (bp->state == BNX2X_STATE_CLOSED || + bp->state == BNX2X_STATE_ERROR)) { /* We can get here if the driver has been unloaded * during parity error recovery and is either waiting for a * leader to complete or for other functions to unload and @@ -2567,8 +2825,16 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) return -EINVAL; } - /* - * It's important to set the bp->state to the value different from + /* Nothing to do during unload if previous bnx2x_nic_load() + * have not completed succesfully - all resourses are released. + * + * we can get here only after unsuccessful ndo_* callback, during which + * dev->IFF_UP flag is still on. + */ + if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) + return 0; + + /* It's important to set the bp->state to the value different from * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() * may restart the Tx from the NAPI context (see bnx2x_tx_int()). */ @@ -2586,16 +2852,24 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) del_timer_sync(&bp->timer); - /* Set ALWAYS_ALIVE bit in shmem */ - bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; - - bnx2x_drv_pulse(bp); + if (IS_PF(bp)) { + /* Set ALWAYS_ALIVE bit in shmem */ + bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; + bnx2x_drv_pulse(bp); + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_save_statistics(bp); + } - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - bnx2x_save_statistics(bp); + /* wait till consumers catch up with producers in all queues */ + bnx2x_drain_tx_queues(bp); - /* Cleanup the chip if needed */ - if (unload_mode != UNLOAD_RECOVERY) + /* if VF indicate to PF this function is going down (PF will delete sp + * elements and clear initializations + */ + if (IS_VF(bp)) + bnx2x_vfpf_close_vf(bp); + else if (unload_mode != UNLOAD_RECOVERY) + /* if this is a normal/close unload need to clean up chip*/ bnx2x_chip_cleanup(bp, unload_mode, keep_link); else { /* Send the UNLOAD_REQUEST to the MCP */ @@ -2628,7 +2902,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) * At this stage no more interrupts will arrive so we may safly clean * the queueable objects here in case they failed to get cleaned so far. */ - bnx2x_squeeze_objects(bp); + if (IS_PF(bp)) + bnx2x_squeeze_objects(bp); /* There should be no more pending SP commands at this stage */ bp->sp_state = 0; @@ -2642,19 +2917,22 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) for_each_rx_queue(bp, i) bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); - if (CNIC_LOADED(bp)) { + bnx2x_free_fp_mem(bp); + if (CNIC_LOADED(bp)) bnx2x_free_fp_mem_cnic(bp); - bnx2x_free_mem_cnic(bp); - } - bnx2x_free_mem(bp); + if (IS_PF(bp)) { + bnx2x_free_mem(bp); + if (CNIC_LOADED(bp)) + bnx2x_free_mem_cnic(bp); + } bp->state = BNX2X_STATE_CLOSED; bp->cnic_loaded = false; /* Check if there are pending parity attentions. If there are - set * RECOVERY_IN_PROGRESS. */ - if (bnx2x_chk_parity_attn(bp, &global, false)) { + if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) { bnx2x_set_reset_in_progress(bp); /* Set RESET_IS_GLOBAL if needed */ @@ -2666,7 +2944,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) /* The last driver must disable a "close the gate" if there is no * parity attention or "process kill" pending. */ - if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) + if (IS_PF(bp) && + !bnx2x_clear_pf_load(bp) && + bnx2x_reset_is_done(bp, BP_PATH(bp))) bnx2x_disable_close_the_gate(bp); DP(NETIF_MSG_IFUP, "Ending NIC unload\n"); @@ -2750,7 +3030,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget) if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) bnx2x_tx_int(bp, fp->txdata_ptr[cos]); - if (bnx2x_has_rx_work(fp)) { work_done += bnx2x_rx_int(fp, budget - work_done); @@ -2849,17 +3128,21 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, return bd_prod; } -static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) +#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32))) +#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16))) +static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) { + __sum16 tsum = (__force __sum16) csum; + if (fix > 0) - csum = (u16) ~csum_fold(csum_sub(csum, - csum_partial(t_header - fix, fix, 0))); + tsum = ~csum_fold(csum_sub((__force __wsum) csum, + csum_partial(t_header - fix, fix, 0))); else if (fix < 0) - csum = (u16) ~csum_fold(csum_add(csum, - csum_partial(t_header, -fix, 0))); + tsum = ~csum_fold(csum_add((__force __wsum) csum, + csum_partial(t_header, -fix, 0))); - return swab16(csum); + return bswab16(csum); } static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) @@ -2993,23 +3276,24 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, u32 xmit_type) { pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); + pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); pbd->tcp_flags = pbd_tcp_flags(skb); if (xmit_type & XMIT_GSO_V4) { - pbd->ip_id = swab16(ip_hdr(skb)->id); + pbd->ip_id = bswab16(ip_hdr(skb)->id); pbd->tcp_pseudo_csum = - swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, - ip_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0)); + bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); } else pbd->tcp_pseudo_csum = - swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0)); + bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); - pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN; + pbd->global_data |= + cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); } /** @@ -3023,12 +3307,12 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, * 57712 related */ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, - u32 *parsing_data, u32 xmit_type) + u32 *parsing_data, u32 xmit_type) { *parsing_data |= - ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; + ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; if (xmit_type & XMIT_CSUM_TCP) { *parsing_data |= ((tcp_hdrlen(skb) / 4) << @@ -3036,12 +3320,11 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; - } else - /* We support checksum offload for TCP and UDP only. - * No need to pass the UDP header length - it's a constant. - */ - return skb_transport_header(skb) + - sizeof(struct udphdr) - skb->data; + } + /* We support checksum offload for TCP and UDP only. + * No need to pass the UDP header length - it's a constant. + */ + return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data; } static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, @@ -3076,8 +3359,9 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, /* for now NS flag is not used in Linux */ pbd->global_data = - (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << - ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); + cpu_to_le16(hlen | + ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << + ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); pbd->ip_hlen_w = (skb_transport_header(skb) - skb_network_header(skb)) >> 1; @@ -3094,7 +3378,7 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, hlen = hlen*2; if (xmit_type & XMIT_CSUM_TCP) { - pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); + pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check); } else { s8 fix = SKB_CS_OFF(skb); /* signed! */ @@ -3174,17 +3458,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb(skb); return NETDEV_TX_OK; } - bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; - netif_tx_stop_queue(txq); + bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; + netif_tx_stop_queue(txq); BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); return NETDEV_TX_BUSY; } DP(NETIF_MSG_TX_QUEUED, - "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n", + "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n", txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, - ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); + ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type, + skb->len); eth = (struct ethhdr *)skb->data; @@ -3265,8 +3550,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) cpu_to_le16(vlan_tx_tag_get(skb)); tx_start_bd->bd_flags.as_bitfield |= (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); - } else - tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + } else { + /* when transmitting in a vf, start bd must hold the ethertype + * for fw to enforce it + */ +#ifndef BNX2X_STOP_ON_ERROR + if (IS_VF(bp)) { +#endif + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(ntohs(eth->h_proto)); +#ifndef BNX2X_STOP_ON_ERROR + } else { + /* used by FW for packet accounting */ + tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + } +#endif + } /* turn on parsing and get a BD */ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); @@ -3282,9 +3581,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) hlen = bnx2x_set_pbd_csum_e2(bp, skb, &pbd_e2_parsing_data, xmit_type); - if (IS_MF_SI(bp)) { - /* - * fill in the MAC addresses in the PBD - for local + + if (IS_MF_SI(bp) || IS_VF(bp)) { + /* fill in the MAC addresses in the PBD - for local * switching */ bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, @@ -3565,7 +3864,6 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) return rc; } - dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); if (netif_running(dev)) @@ -3761,6 +4059,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) } else /* if rx_ring_size specified - use it */ rx_ring_size = bp->rx_ring_size; + DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size); + /* Common */ sb = &bnx2x_fp(bp, index, status_blk); @@ -3907,7 +4207,10 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) void bnx2x_free_mem_bp(struct bnx2x *bp) { - kfree(bp->fp->tpa_info); + int i; + + for (i = 0; i < bp->fp_array_size; i++) + kfree(bp->fp[i].tpa_info); kfree(bp->fp); kfree(bp->sp_objs); kfree(bp->fp_stats); @@ -3927,18 +4230,22 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp) /* * The biggest MSI-X table we might need is as a maximum number of fast - * path IGU SBs plus default SB (for PF). + * path IGU SBs plus default SB (for PF only). */ - msix_table_size = bp->igu_sb_cnt + 1; + msix_table_size = bp->igu_sb_cnt; + if (IS_PF(bp)) + msix_table_size++; + BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size); /* fp array: RSS plus CNIC related L2 queues */ fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp); - BNX2X_DEV_INFO("fp_array_size %d", fp_array_size); + bp->fp_array_size = fp_array_size; + BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size); - fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL); + fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); if (!fp) goto alloc_err; - for (i = 0; i < fp_array_size; i++) { + for (i = 0; i < bp->fp_array_size; i++) { fp[i].tpa_info = kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2, sizeof(struct bnx2x_agg_info), GFP_KERNEL); @@ -3949,13 +4256,13 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp) bp->fp = fp; /* allocate sp objs */ - bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs), + bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs), GFP_KERNEL); if (!bp->sp_objs) goto alloc_err; /* allocate fp_stats */ - bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats), + bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats), GFP_KERNEL); if (!bp->fp_stats) goto alloc_err; @@ -4034,7 +4341,7 @@ int bnx2x_get_link_cfg_idx(struct bnx2x *bp) { u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); /* - * The selected actived PHY is always after swapping (in case PHY + * The selected activated PHY is always after swapping (in case PHY * swapping is enabled). So when swapping is enabled, we need to reverse * the configuration */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 0991534f61da..aee7671ff4c1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -1,6 +1,6 @@ /* bnx2x_cmn.h: Broadcom Everest network driver. * - * Copyright (c) 2007-2012 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -24,6 +24,7 @@ #include "bnx2x.h" +#include "bnx2x_sriov.h" /* This is used as a replacement for an MCP if it's not present */ extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ @@ -196,6 +197,7 @@ void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, /* Disable transactions from chip to host */ void bnx2x_pf_disable(struct bnx2x *bp); +int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val); /** * bnx2x__link_status_update - handles link status change. @@ -401,7 +403,7 @@ void bnx2x_set_rx_mode(struct net_device *dev); * If bp->state is OPEN, should be called with * netif_addr_lock_bh(). */ -void bnx2x_set_storm_rx_mode(struct bnx2x *bp); +int bnx2x_set_storm_rx_mode(struct bnx2x *bp); /** * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. @@ -413,11 +415,11 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp); * @tx_accept_flags: tx accept configuration (tx switch) * @ramrod_flags: ramrod configuration */ -void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, - unsigned long rx_mode_flags, - unsigned long rx_accept_flags, - unsigned long tx_accept_flags, - unsigned long ramrod_flags); +int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, + unsigned long ramrod_flags); /* Parity errors related */ void bnx2x_set_pf_load(struct bnx2x *bp); @@ -477,8 +479,6 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); */ void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); /* Error handling */ -void bnx2x_panic_dump(struct bnx2x *bp); - void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); /* validate currect fw is loaded */ @@ -496,9 +496,44 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); /* setup_tc callback */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); +int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); + /* select_queue callback */ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); +static inline void bnx2x_update_rx_prod(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + u16 bd_prod, u16 rx_comp_prod, + u16 rx_sge_prod) +{ + struct ustorm_eth_rx_producers rx_prods = {0}; + u32 i; + + /* Update producers */ + rx_prods.bd_prod = bd_prod; + rx_prods.cqe_prod = rx_comp_prod; + rx_prods.sge_prod = rx_sge_prod; + + /* Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes BDs must have buffers. + */ + wmb(); + + for (i = 0; i < sizeof(rx_prods)/4; i++) + REG_WR(bp, fp->ustorm_rx_prods_offset + i*4, + ((u32 *)&rx_prods)[i]); + + mmiowb(); /* keep prod updates ordered */ + + DP(NETIF_MSG_RX_STATUS, + "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", + fp->index, bd_prod, rx_comp_prod, rx_sge_prod); +} + /* reload helper */ int bnx2x_reload_if_running(struct net_device *dev); @@ -507,9 +542,6 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p); /* NAPI poll Rx part */ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); -void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); - /* NAPI poll Tx part */ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); @@ -612,38 +644,6 @@ static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; } -static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 bd_prod, - u16 rx_comp_prod, u16 rx_sge_prod, u32 start) -{ - struct ustorm_eth_rx_producers rx_prods = {0}; - u32 i; - - /* Update producers */ - rx_prods.bd_prod = bd_prod; - rx_prods.cqe_prod = rx_comp_prod; - rx_prods.sge_prod = rx_sge_prod; - - /* - * Make sure that the BD and SGE data is updated before updating the - * producers since FW might read the BD/SGE right after the producer - * is updated. - * This is only applicable for weak-ordered memory model archs such - * as IA-64. The following barrier is also mandatory since FW will - * assumes BDs must have buffers. - */ - wmb(); - - for (i = 0; i < sizeof(rx_prods)/4; i++) - REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); - - mmiowb(); /* keep prod updates ordered */ - - DP(NETIF_MSG_RX_STATUS, - "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", - fp->index, bd_prod, rx_comp_prod, rx_sge_prod); -} - static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, u8 segment, u16 index, u8 op, u8 update, u32 igu_addr) @@ -819,7 +819,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, return; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), - SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + SGE_PAGES, DMA_FROM_DEVICE); __free_pages(page, PAGES_PER_SGE_SHIFT); sw_buf->page = NULL; @@ -863,7 +863,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp) netif_napi_del(&bnx2x_fp(bp, i, napi)); } -void bnx2x_set_int_mode(struct bnx2x *bp); +int bnx2x_set_int_mode(struct bnx2x *bp); static inline void bnx2x_disable_msi(struct bnx2x *bp) { @@ -973,7 +973,6 @@ static inline int bnx2x_func_start(struct bnx2x *bp) return bnx2x_func_state_change(bp, &func_params); } - /** * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format * @@ -982,8 +981,8 @@ static inline int bnx2x_func_start(struct bnx2x *bp) * @fw_lo: pointer to lower part * @mac: pointer to MAC address */ -static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, - u8 *mac) +static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, + __le16 *fw_lo, u8 *mac) { ((u8 *)fw_hi)[0] = mac[1]; ((u8 *)fw_hi)[1] = mac[0]; @@ -1108,6 +1107,9 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp) bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), bnx2x_get_path_func_num(bp)); + bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1, + bnx2x_get_path_func_num(bp)); + /* RSS configuration object */ bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), @@ -1125,15 +1127,7 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) return fp->cl_id; } -static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) -{ - struct bnx2x *bp = fp->bp; - - if (!CHIP_IS_E1x(bp)) - return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); - else - return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); -} +u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp); static inline void bnx2x_init_txdata(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, u32 cid, @@ -1228,7 +1222,7 @@ static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, #endif } cnt--; - usleep_range(1000, 1000); + usleep_range(1000, 2000); } return 0; @@ -1263,7 +1257,7 @@ static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) } netif_addr_unlock_bh(bp->dev); - usleep_range(1000, 1000); + usleep_range(1000, 2000); } smp_mb(); @@ -1393,4 +1387,13 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) return false; } +/** + * bnx2x_fill_fw_str - Fill buffer with FW version string + * + * @bp: driver handle + * @buf: character buffer to fill with the fw name + * @buf_len: length of the above buffer + * + */ +void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 10bc093d2ca4..568205436a15 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -1,6 +1,6 @@ /* bnx2x_dcb.c: Broadcom Everest network driver. * - * Copyright 2009-2012 Broadcom Corporation + * Copyright 2009-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -416,6 +416,7 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp) int mfw_configured = SHMEM2_HAS(bp, drv_flags) && GET_FLAGS(SHMEM2_RD(bp, drv_flags), 1 << DRV_FLAGS_DCB_MFW_CONFIGURED); + if (bp->dcbx_port_params.pfc.enabled && (!(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) || mfw_configured)) /* @@ -558,6 +559,7 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) int mfw_configured = SHMEM2_HAS(bp, drv_flags) && GET_FLAGS(SHMEM2_RD(bp, drv_flags), 1 << DRV_FLAGS_DCB_MFW_CONFIGURED); + bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); if (!bp->dcbx_port_params.ets.enabled || @@ -1904,11 +1906,13 @@ static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state) struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off"); + /* Fail to set state to "enabled" if dcbx is disabled in nvram */ if (state && ((bp->dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) || (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_INVALID))) { DP(BNX2X_MSG_DCB, "Can not set dcbx to enabled while it is disabled in nvm\n"); return 1; } + bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled); return 0; } @@ -2052,7 +2056,6 @@ static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES) return; - if (setting) { bp->dcbx_config_params.admin_pfc_bitmap |= (1 << prio); bp->dcbx_config_params.admin_pfc_tx_enable = 1; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index 06c7a0435948..d153f44cf8f9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -1,6 +1,6 @@ /* bnx2x_dcb.h: Broadcom Everest network driver. * - * Copyright 2009-2012 Broadcom Corporation + * Copyright 2009-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index b926f58e983b..bff5e33eaa14 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -1,6 +1,6 @@ /* bnx2x_dump.h: Broadcom Everest network driver. * - * Copyright (c) 2012 Broadcom Corporation + * Copyright (c) 2012-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -22,120 +22,37 @@ #ifndef BNX2X_DUMP_H #define BNX2X_DUMP_H +/* WaitP Definitions */ +#define DRV_DUMP_XSTORM_WAITP_ADDRESS 0x2b8a80 +#define DRV_DUMP_TSTORM_WAITP_ADDRESS 0x1b8a80 +#define DRV_DUMP_USTORM_WAITP_ADDRESS 0x338a80 +#define DRV_DUMP_CSTORM_WAITP_ADDRESS 0x238a80 -/*definitions */ -#define XSTORM_WAITP_ADDR 0x2b8a80 -#define TSTORM_WAITP_ADDR 0x1b8a80 -#define USTORM_WAITP_ADDR 0x338a80 -#define CSTORM_WAITP_ADDR 0x238a80 -#define TSTORM_CAM_MODE 0x1B1440 +/* Possible Chips */ +#define DUMP_CHIP_E1 1 +#define DUMP_CHIP_E1H 2 +#define DUMP_CHIP_E2 4 +#define DUMP_CHIP_E3A0 8 +#define DUMP_CHIP_E3B0 16 +#define DUMP_PATH_0 512 +#define DUMP_PATH_1 1024 +#define NUM_PRESETS 13 +#define NUM_CHIPS 5 -#define MAX_TIMER_PENDING 200 -#define TIMER_SCAN_DONT_CARE 0xFF -#define RI_E1 0x1 -#define RI_E1H 0x2 -#define RI_E2 0x4 -#define RI_E3 0x8 -#define RI_E3B0 0x10 -#define RI_ONLINE 0x100 -#define RI_OFFLINE 0x0 -#define RI_PATH0_DUMP 0x200 -#define RI_PATH1_DUMP 0x400 - -#define RI_E1_ONLINE (RI_E1 | RI_ONLINE) -#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) -#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) -#define RI_E2_ONLINE (RI_E2 | RI_ONLINE) -#define RI_E1E2_ONLINE (RI_E1 | RI_E2 | RI_ONLINE) -#define RI_E1HE2_ONLINE (RI_E1H | RI_E2 | RI_ONLINE) -#define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) -#define RI_E3_ONLINE (RI_E3 | RI_ONLINE) -#define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE) -#define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE) -#define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE) -#define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE) -#define RI_E1E2E3_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_ONLINE) -#define RI_E1HE2E3_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_ONLINE) -#define RI_E1E1HE2E3_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE) -#define RI_E3B0_ONLINE (RI_E3B0 | RI_ONLINE) -#define RI_E1E3B0_ONLINE (RI_E1 | RI_E3B0 | RI_ONLINE) -#define RI_E1HE3B0_ONLINE (RI_E1H | RI_E3B0 | RI_ONLINE) -#define RI_E1E1HE3B0_ONLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_ONLINE) -#define RI_E2E3B0_ONLINE (RI_E2 | RI_E3B0 | RI_ONLINE) -#define RI_E1E2E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_ONLINE) -#define RI_E1HE2E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE) -#define RI_E1E1HE2E3B0_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE) -#define RI_E3E3B0_ONLINE (RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1E3E3B0_ONLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1HE3E3B0_ONLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1E1HE3E3B0_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E2E3E3B0_ONLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1E2E3E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1HE2E3E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1E1HE2E3E3B0_ONLINE \ - (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1_OFFLINE (RI_E1 | RI_OFFLINE) -#define RI_E1H_OFFLINE (RI_E1H | RI_OFFLINE) -#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H | RI_OFFLINE) -#define RI_E2_OFFLINE (RI_E2 | RI_OFFLINE) -#define RI_E1E2_OFFLINE (RI_E1 | RI_E2 | RI_OFFLINE) -#define RI_E1HE2_OFFLINE (RI_E1H | RI_E2 | RI_OFFLINE) -#define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_OFFLINE) -#define RI_E3_OFFLINE (RI_E3 | RI_OFFLINE) -#define RI_E1E3_OFFLINE (RI_E1 | RI_E3 | RI_OFFLINE) -#define RI_E1HE3_OFFLINE (RI_E1H | RI_E3 | RI_OFFLINE) -#define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_OFFLINE) -#define RI_E2E3_OFFLINE (RI_E2 | RI_E3 | RI_OFFLINE) -#define RI_E1E2E3_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_OFFLINE) -#define RI_E1HE2E3_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE) -#define RI_E1E1HE2E3_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE) -#define RI_E3B0_OFFLINE (RI_E3B0 | RI_OFFLINE) -#define RI_E1E3B0_OFFLINE (RI_E1 | RI_E3B0 | RI_OFFLINE) -#define RI_E1HE3B0_OFFLINE (RI_E1H | RI_E3B0 | RI_OFFLINE) -#define RI_E1E1HE3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_OFFLINE) -#define RI_E2E3B0_OFFLINE (RI_E2 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E2E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_OFFLINE) -#define RI_E1HE2E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E1HE2E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE) -#define RI_E3E3B0_OFFLINE (RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E3E3B0_OFFLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1HE3E3B0_OFFLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E1HE3E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E2E3E3B0_OFFLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E2E3E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1HE2E3E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E1HE2E3E3B0_OFFLINE \ - (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_ALL_ONLINE RI_E1E1HE2E3E3B0_ONLINE -#define RI_ALL_OFFLINE RI_E1E1HE2E3E3B0_OFFLINE - -#define DBG_DMP_TRACE_BUFFER_SIZE 0x800 -#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \ - ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE) - -struct dump_sign { - u32 time_stamp; - u32 diag_ver; - u32 grc_dump_ver; -}; - -struct dump_hdr { - u32 hdr_size; /* in dwords, excluding this field */ - struct dump_sign dump_sign; - u32 xstorm_waitp; - u32 tstorm_waitp; - u32 ustorm_waitp; - u32 cstorm_waitp; - u16 info; - u8 idle_chk; - u8 reserved; +struct dump_header { + u32 header_size; /* Size in DWORDs excluding this field */ + u32 version; + u32 preset; + u32 dump_meta_data; /* OR of CHIP and PATH. */ }; +#define BNX2X_DUMP_VERSION 0x50acff01 struct reg_addr { u32 addr; u32 size; - u16 info; + u32 chips; + u32 presets; }; struct wreg_addr { @@ -143,1005 +60,2168 @@ struct wreg_addr { u32 size; u32 read_regs_count; const u32 *read_regs; - u16 info; + u32 chips; + u32 presets; +}; + +#define PAGE_MODE_VALUES_E2 2 +#define PAGE_READ_REGS_E2 1 +#define PAGE_WRITE_REGS_E2 1 +static const u32 page_vals_e2[] = {0, 128}; +static const u32 page_write_regs_e2[] = {328476}; +static const struct reg_addr page_read_regs_e2[] = { + {0x58000, 4608, DUMP_CHIP_E2, 0x30} +}; + +#define PAGE_MODE_VALUES_E3 2 +#define PAGE_READ_REGS_E3 1 +#define PAGE_WRITE_REGS_E3 1 +static const u32 page_vals_e3[] = {0, 128}; +static const u32 page_write_regs_e3[] = {328476}; +static const struct reg_addr page_read_regs_e3[] = { + {0x58000, 4608, DUMP_CHIP_E3A0 | DUMP_CHIP_E3B0, 0x30} }; static const struct reg_addr reg_addrs[] = { - { 0x2000, 341, RI_ALL_ONLINE }, - { 0x2800, 103, RI_ALL_ONLINE }, - { 0x3000, 287, RI_ALL_ONLINE }, - { 0x3800, 331, RI_ALL_ONLINE }, - { 0x8800, 6, RI_ALL_ONLINE }, - { 0x8818, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x9000, 147, RI_E2E3E3B0_ONLINE }, - { 0x924c, 1, RI_E2_ONLINE }, - { 0x9250, 16, RI_E2E3E3B0_ONLINE }, - { 0x9400, 33, RI_E2E3E3B0_ONLINE }, - { 0x9484, 5, RI_E3E3B0_ONLINE }, - { 0xa000, 27, RI_ALL_ONLINE }, - { 0xa06c, 1, RI_E1E1H_ONLINE }, - { 0xa070, 71, RI_ALL_ONLINE }, - { 0xa18c, 4, RI_E1E1H_ONLINE }, - { 0xa19c, 62, RI_ALL_ONLINE }, - { 0xa294, 2, RI_E1E1H_ONLINE }, - { 0xa29c, 2, RI_ALL_ONLINE }, - { 0xa2a4, 2, RI_E1E1HE2_ONLINE }, - { 0xa2ac, 52, RI_ALL_ONLINE }, - { 0xa39c, 7, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3b8, 2, RI_E3E3B0_ONLINE }, - { 0xa3c0, 3, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3d0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3d8, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3e0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3e8, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3f0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3f8, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa400, 40, RI_ALL_ONLINE }, - { 0xa4a0, 1, RI_E1E1HE2_ONLINE }, - { 0xa4a4, 2, RI_ALL_ONLINE }, - { 0xa4ac, 2, RI_E1E1H_ONLINE }, - { 0xa4b4, 1, RI_E1E1HE2_ONLINE }, - { 0xa4b8, 2, RI_E1E1H_ONLINE }, - { 0xa4c0, 3, RI_ALL_ONLINE }, - { 0xa4cc, 5, RI_E1E1H_ONLINE }, - { 0xa4e0, 3, RI_ALL_ONLINE }, - { 0xa4fc, 2, RI_ALL_ONLINE }, - { 0xa504, 1, RI_E1E1H_ONLINE }, - { 0xa508, 3, RI_ALL_ONLINE }, - { 0xa518, 1, RI_ALL_ONLINE }, - { 0xa520, 1, RI_ALL_ONLINE }, - { 0xa528, 1, RI_ALL_ONLINE }, - { 0xa530, 1, RI_ALL_ONLINE }, - { 0xa538, 1, RI_ALL_ONLINE }, - { 0xa540, 1, RI_ALL_ONLINE }, - { 0xa548, 1, RI_E1E1H_ONLINE }, - { 0xa550, 1, RI_E1E1H_ONLINE }, - { 0xa558, 1, RI_E1E1H_ONLINE }, - { 0xa560, 1, RI_E1E1H_ONLINE }, - { 0xa568, 1, RI_E1E1H_ONLINE }, - { 0xa570, 1, RI_ALL_ONLINE }, - { 0xa580, 1, RI_ALL_ONLINE }, - { 0xa590, 1, RI_ALL_ONLINE }, - { 0xa5a0, 1, RI_E1E1HE2_ONLINE }, - { 0xa5c0, 1, RI_ALL_ONLINE }, - { 0xa5e0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa5e8, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa5f0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa5f8, 1, RI_E1HE2_ONLINE }, - { 0xa5fc, 9, RI_E1HE2E3E3B0_ONLINE }, - { 0xa620, 6, RI_E2E3E3B0_ONLINE }, - { 0xa638, 20, RI_E2_ONLINE }, - { 0xa688, 42, RI_E2E3E3B0_ONLINE }, - { 0xa730, 1, RI_E2_ONLINE }, - { 0xa734, 2, RI_E2E3E3B0_ONLINE }, - { 0xa73c, 4, RI_E2_ONLINE }, - { 0xa74c, 5, RI_E2E3E3B0_ONLINE }, - { 0xa760, 5, RI_E2_ONLINE }, - { 0xa774, 7, RI_E2E3E3B0_ONLINE }, - { 0xa790, 15, RI_E2_ONLINE }, - { 0xa7cc, 4, RI_E2E3E3B0_ONLINE }, - { 0xa7e0, 6, RI_E3E3B0_ONLINE }, - { 0xa800, 18, RI_E2_ONLINE }, - { 0xa848, 33, RI_E2E3E3B0_ONLINE }, - { 0xa8cc, 2, RI_E3E3B0_ONLINE }, - { 0xa8d4, 4, RI_E2E3E3B0_ONLINE }, - { 0xa8e4, 1, RI_E3E3B0_ONLINE }, - { 0xa8e8, 1, RI_E2E3E3B0_ONLINE }, - { 0xa8f0, 1, RI_E2E3E3B0_ONLINE }, - { 0xa8f8, 30, RI_E3E3B0_ONLINE }, - { 0xa974, 73, RI_E3E3B0_ONLINE }, - { 0xac30, 1, RI_E3E3B0_ONLINE }, - { 0xac40, 1, RI_E3E3B0_ONLINE }, - { 0xac50, 1, RI_E3E3B0_ONLINE }, - { 0xac60, 1, RI_E3B0_ONLINE }, - { 0x10000, 9, RI_ALL_ONLINE }, - { 0x10024, 1, RI_E1E1HE2_ONLINE }, - { 0x10028, 5, RI_ALL_ONLINE }, - { 0x1003c, 6, RI_E1E1HE2_ONLINE }, - { 0x10054, 20, RI_ALL_ONLINE }, - { 0x100a4, 4, RI_E1E1HE2_ONLINE }, - { 0x100b4, 11, RI_ALL_ONLINE }, - { 0x100e0, 4, RI_E1E1HE2_ONLINE }, - { 0x100f0, 8, RI_ALL_ONLINE }, - { 0x10110, 6, RI_E1E1HE2_ONLINE }, - { 0x10128, 110, RI_ALL_ONLINE }, - { 0x102e0, 4, RI_E1E1HE2_ONLINE }, - { 0x102f0, 18, RI_ALL_ONLINE }, - { 0x10338, 20, RI_E1E1HE2_ONLINE }, - { 0x10388, 10, RI_ALL_ONLINE }, - { 0x10400, 6, RI_E1E1HE2_ONLINE }, - { 0x10418, 6, RI_ALL_ONLINE }, - { 0x10430, 10, RI_E1E1HE2_ONLINE }, - { 0x10458, 22, RI_ALL_ONLINE }, - { 0x104b0, 12, RI_E1E1HE2_ONLINE }, - { 0x104e0, 1, RI_ALL_ONLINE }, - { 0x104e8, 2, RI_ALL_ONLINE }, - { 0x104f4, 2, RI_ALL_ONLINE }, - { 0x10500, 146, RI_ALL_ONLINE }, - { 0x10750, 2, RI_E1E1HE2_ONLINE }, - { 0x10760, 2, RI_E1E1HE2_ONLINE }, - { 0x10770, 2, RI_E1E1HE2_ONLINE }, - { 0x10780, 2, RI_E1E1HE2_ONLINE }, - { 0x10790, 2, RI_ALL_ONLINE }, - { 0x107a0, 2, RI_E1E1HE2_ONLINE }, - { 0x107b0, 2, RI_E1E1HE2_ONLINE }, - { 0x107c0, 2, RI_E1E1HE2_ONLINE }, - { 0x107d0, 2, RI_E1E1HE2_ONLINE }, - { 0x107e0, 2, RI_ALL_ONLINE }, - { 0x10880, 2, RI_ALL_ONLINE }, - { 0x10900, 2, RI_ALL_ONLINE }, - { 0x16000, 1, RI_E1HE2_ONLINE }, - { 0x16004, 25, RI_E1HE2E3E3B0_ONLINE }, - { 0x16070, 8, RI_E1HE2E3E3B0_ONLINE }, - { 0x16090, 4, RI_E1HE2E3_ONLINE }, - { 0x160a0, 6, RI_E1HE2E3E3B0_ONLINE }, - { 0x160c0, 7, RI_E1HE2E3E3B0_ONLINE }, - { 0x160dc, 2, RI_E1HE2_ONLINE }, - { 0x160e4, 10, RI_E1HE2E3E3B0_ONLINE }, - { 0x1610c, 2, RI_E1HE2_ONLINE }, - { 0x16114, 6, RI_E1HE2E3E3B0_ONLINE }, - { 0x16140, 48, RI_E1HE2E3E3B0_ONLINE }, - { 0x16204, 5, RI_E1HE2E3E3B0_ONLINE }, - { 0x18000, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x18008, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x18010, 35, RI_E2E3E3B0_ONLINE }, - { 0x180a4, 2, RI_E2E3E3B0_ONLINE }, - { 0x180c0, 9, RI_E2E3E3B0_ONLINE }, - { 0x180e4, 1, RI_E2E3_ONLINE }, - { 0x180e8, 2, RI_E2E3E3B0_ONLINE }, - { 0x180f0, 1, RI_E2E3_ONLINE }, - { 0x180f4, 79, RI_E2E3E3B0_ONLINE }, - { 0x18230, 1, RI_E2E3_ONLINE }, - { 0x18234, 2, RI_E2E3E3B0_ONLINE }, - { 0x1823c, 1, RI_E2E3_ONLINE }, - { 0x18240, 13, RI_E2E3E3B0_ONLINE }, - { 0x18274, 1, RI_E2_ONLINE }, - { 0x18278, 81, RI_E2E3E3B0_ONLINE }, - { 0x18440, 63, RI_E2E3E3B0_ONLINE }, - { 0x18570, 42, RI_E3E3B0_ONLINE }, - { 0x18618, 25, RI_E3B0_ONLINE }, - { 0x18680, 44, RI_E3B0_ONLINE }, - { 0x18748, 12, RI_E3B0_ONLINE }, - { 0x18788, 1, RI_E3B0_ONLINE }, - { 0x1879c, 6, RI_E3B0_ONLINE }, - { 0x187c4, 51, RI_E3B0_ONLINE }, - { 0x18a00, 48, RI_E3B0_ONLINE }, - { 0x20000, 24, RI_ALL_ONLINE }, - { 0x20060, 8, RI_ALL_ONLINE }, - { 0x20080, 94, RI_ALL_ONLINE }, - { 0x201f8, 1, RI_E1E1H_ONLINE }, - { 0x201fc, 1, RI_ALL_ONLINE }, - { 0x20200, 1, RI_E1E1H_ONLINE }, - { 0x20204, 1, RI_ALL_ONLINE }, - { 0x20208, 1, RI_E1E1H_ONLINE }, - { 0x2020c, 39, RI_ALL_ONLINE }, - { 0x202c8, 1, RI_E2E3E3B0_ONLINE }, - { 0x202d8, 4, RI_E2E3E3B0_ONLINE }, - { 0x202f0, 1, RI_E3B0_ONLINE }, - { 0x20400, 2, RI_ALL_ONLINE }, - { 0x2040c, 8, RI_ALL_ONLINE }, - { 0x2042c, 18, RI_E1HE2E3E3B0_ONLINE }, - { 0x20480, 1, RI_ALL_ONLINE }, - { 0x20500, 1, RI_ALL_ONLINE }, - { 0x20600, 1, RI_ALL_ONLINE }, - { 0x28000, 1, RI_ALL_ONLINE }, - { 0x28004, 8191, RI_ALL_OFFLINE }, - { 0x30000, 1, RI_ALL_ONLINE }, - { 0x30004, 16383, RI_ALL_OFFLINE }, - { 0x40000, 98, RI_ALL_ONLINE }, - { 0x401a8, 8, RI_E1HE2E3E3B0_ONLINE }, - { 0x401c8, 1, RI_E1H_ONLINE }, - { 0x401cc, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x401d4, 2, RI_E2E3E3B0_ONLINE }, - { 0x40200, 4, RI_ALL_ONLINE }, - { 0x40220, 6, RI_E2E3E3B0_ONLINE }, - { 0x40238, 8, RI_E2E3_ONLINE }, - { 0x40258, 4, RI_E2E3E3B0_ONLINE }, - { 0x40268, 2, RI_E3E3B0_ONLINE }, - { 0x40270, 17, RI_E3B0_ONLINE }, - { 0x40400, 43, RI_ALL_ONLINE }, - { 0x404cc, 3, RI_E1HE2E3E3B0_ONLINE }, - { 0x404e0, 1, RI_E2E3E3B0_ONLINE }, - { 0x40500, 2, RI_ALL_ONLINE }, - { 0x40510, 2, RI_ALL_ONLINE }, - { 0x40520, 2, RI_ALL_ONLINE }, - { 0x40530, 2, RI_ALL_ONLINE }, - { 0x40540, 2, RI_ALL_ONLINE }, - { 0x40550, 10, RI_E2E3E3B0_ONLINE }, - { 0x40610, 2, RI_E2E3E3B0_ONLINE }, - { 0x42000, 164, RI_ALL_ONLINE }, - { 0x422c0, 4, RI_E2E3E3B0_ONLINE }, - { 0x422d4, 5, RI_E1HE2E3E3B0_ONLINE }, - { 0x422e8, 1, RI_E2E3E3B0_ONLINE }, - { 0x42400, 49, RI_ALL_ONLINE }, - { 0x424c8, 38, RI_ALL_ONLINE }, - { 0x42568, 2, RI_ALL_ONLINE }, - { 0x42640, 5, RI_E2E3E3B0_ONLINE }, - { 0x42800, 1, RI_ALL_ONLINE }, - { 0x50000, 1, RI_ALL_ONLINE }, - { 0x50004, 19, RI_ALL_ONLINE }, - { 0x50050, 8, RI_ALL_ONLINE }, - { 0x50070, 88, RI_ALL_ONLINE }, - { 0x501f0, 4, RI_E1HE2E3E3B0_ONLINE }, - { 0x50200, 2, RI_ALL_ONLINE }, - { 0x5020c, 7, RI_ALL_ONLINE }, - { 0x50228, 6, RI_E1HE2E3E3B0_ONLINE }, - { 0x50240, 1, RI_ALL_ONLINE }, - { 0x50280, 1, RI_ALL_ONLINE }, - { 0x50300, 1, RI_E2E3E3B0_ONLINE }, - { 0x5030c, 1, RI_E2E3E3B0_ONLINE }, - { 0x50318, 1, RI_E2E3E3B0_ONLINE }, - { 0x5031c, 1, RI_E2E3E3B0_ONLINE }, - { 0x50320, 2, RI_E2E3E3B0_ONLINE }, - { 0x50330, 1, RI_E3B0_ONLINE }, - { 0x52000, 1, RI_ALL_ONLINE }, - { 0x54000, 1, RI_ALL_ONLINE }, - { 0x54004, 3327, RI_ALL_OFFLINE }, - { 0x58000, 1, RI_ALL_ONLINE }, - { 0x58004, 8191, RI_E1E1H_OFFLINE }, - { 0x60000, 26, RI_ALL_ONLINE }, - { 0x60068, 8, RI_E1E1H_ONLINE }, - { 0x60088, 12, RI_ALL_ONLINE }, - { 0x600b8, 9, RI_E1E1H_ONLINE }, - { 0x600dc, 1, RI_ALL_ONLINE }, - { 0x600e0, 5, RI_E1E1H_ONLINE }, - { 0x600f4, 1, RI_E1E1HE2_ONLINE }, - { 0x600f8, 1, RI_E1E1H_ONLINE }, - { 0x600fc, 8, RI_ALL_ONLINE }, - { 0x6013c, 24, RI_E1H_ONLINE }, - { 0x6019c, 2, RI_E2E3E3B0_ONLINE }, - { 0x601ac, 18, RI_E2E3E3B0_ONLINE }, - { 0x60200, 1, RI_ALL_ONLINE }, - { 0x60204, 2, RI_ALL_OFFLINE }, - { 0x60210, 13, RI_E2E3E3B0_ONLINE }, - { 0x60244, 16, RI_E3B0_ONLINE }, - { 0x61000, 1, RI_ALL_ONLINE }, - { 0x61004, 511, RI_ALL_OFFLINE }, - { 0x61800, 512, RI_E3E3B0_OFFLINE }, - { 0x70000, 8, RI_ALL_ONLINE }, - { 0x70020, 8184, RI_ALL_OFFLINE }, - { 0x78000, 8192, RI_E3E3B0_OFFLINE }, - { 0x85000, 3, RI_ALL_OFFLINE }, - { 0x8501c, 7, RI_ALL_OFFLINE }, - { 0x85048, 1, RI_ALL_OFFLINE }, - { 0x85200, 32, RI_ALL_OFFLINE }, - { 0xb0000, 16384, RI_E1H_OFFLINE }, - { 0xc1000, 7, RI_ALL_ONLINE }, - { 0xc103c, 2, RI_E2E3E3B0_ONLINE }, - { 0xc1800, 2, RI_ALL_ONLINE }, - { 0xc2000, 164, RI_ALL_ONLINE }, - { 0xc22c0, 5, RI_E2E3E3B0_ONLINE }, - { 0xc22d8, 4, RI_E2E3E3B0_ONLINE }, - { 0xc2400, 49, RI_ALL_ONLINE }, - { 0xc24c8, 38, RI_ALL_ONLINE }, - { 0xc2568, 2, RI_ALL_ONLINE }, - { 0xc2600, 1, RI_ALL_ONLINE }, - { 0xc4000, 165, RI_ALL_ONLINE }, - { 0xc42d8, 2, RI_E2E3E3B0_ONLINE }, - { 0xc42e0, 7, RI_E1HE2E3E3B0_ONLINE }, - { 0xc42fc, 1, RI_E2E3E3B0_ONLINE }, - { 0xc4400, 51, RI_ALL_ONLINE }, - { 0xc44d0, 38, RI_ALL_ONLINE }, - { 0xc4570, 2, RI_ALL_ONLINE }, - { 0xc4578, 5, RI_E2E3E3B0_ONLINE }, - { 0xc4600, 1, RI_ALL_ONLINE }, - { 0xd0000, 19, RI_ALL_ONLINE }, - { 0xd004c, 8, RI_ALL_ONLINE }, - { 0xd006c, 91, RI_ALL_ONLINE }, - { 0xd01fc, 1, RI_E2E3E3B0_ONLINE }, - { 0xd0200, 2, RI_ALL_ONLINE }, - { 0xd020c, 7, RI_ALL_ONLINE }, - { 0xd0228, 18, RI_E1HE2E3E3B0_ONLINE }, - { 0xd0280, 1, RI_ALL_ONLINE }, - { 0xd0300, 1, RI_ALL_ONLINE }, - { 0xd0400, 1, RI_ALL_ONLINE }, - { 0xd0818, 1, RI_E3B0_ONLINE }, - { 0xd4000, 1, RI_ALL_ONLINE }, - { 0xd4004, 2559, RI_ALL_OFFLINE }, - { 0xd8000, 1, RI_ALL_ONLINE }, - { 0xd8004, 8191, RI_ALL_OFFLINE }, - { 0xe0000, 21, RI_ALL_ONLINE }, - { 0xe0054, 8, RI_ALL_ONLINE }, - { 0xe0074, 49, RI_ALL_ONLINE }, - { 0xe0138, 1, RI_E1E1H_ONLINE }, - { 0xe013c, 35, RI_ALL_ONLINE }, - { 0xe01f4, 1, RI_E2_ONLINE }, - { 0xe01f8, 1, RI_E2E3E3B0_ONLINE }, - { 0xe0200, 2, RI_ALL_ONLINE }, - { 0xe020c, 8, RI_ALL_ONLINE }, - { 0xe022c, 18, RI_E1HE2E3E3B0_ONLINE }, - { 0xe0280, 1, RI_ALL_ONLINE }, - { 0xe0300, 1, RI_ALL_ONLINE }, - { 0xe0400, 1, RI_E3B0_ONLINE }, - { 0xe1000, 1, RI_ALL_ONLINE }, - { 0xe2000, 1, RI_ALL_ONLINE }, - { 0xe2004, 2047, RI_ALL_OFFLINE }, - { 0xf0000, 1, RI_ALL_ONLINE }, - { 0xf0004, 16383, RI_ALL_OFFLINE }, - { 0x101000, 12, RI_ALL_ONLINE }, - { 0x101050, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x101054, 3, RI_E2E3E3B0_ONLINE }, - { 0x101100, 1, RI_ALL_ONLINE }, - { 0x101800, 8, RI_ALL_ONLINE }, - { 0x102000, 18, RI_ALL_ONLINE }, - { 0x102068, 6, RI_E2E3E3B0_ONLINE }, - { 0x102080, 17, RI_ALL_ONLINE }, - { 0x1020c8, 8, RI_E1H_ONLINE }, - { 0x1020e8, 9, RI_E2E3E3B0_ONLINE }, - { 0x102400, 1, RI_ALL_ONLINE }, - { 0x103000, 26, RI_ALL_ONLINE }, - { 0x103098, 5, RI_E1HE2E3E3B0_ONLINE }, - { 0x1030ac, 2, RI_E2E3E3B0_ONLINE }, - { 0x1030b4, 1, RI_E2_ONLINE }, - { 0x1030b8, 7, RI_E2E3E3B0_ONLINE }, - { 0x1030d8, 8, RI_E2E3E3B0_ONLINE }, - { 0x103400, 1, RI_E2E3E3B0_ONLINE }, - { 0x103404, 135, RI_E2E3E3B0_OFFLINE }, - { 0x103800, 8, RI_ALL_ONLINE }, - { 0x104000, 63, RI_ALL_ONLINE }, - { 0x10411c, 16, RI_E2E3E3B0_ONLINE }, - { 0x104200, 17, RI_ALL_ONLINE }, - { 0x104400, 64, RI_ALL_ONLINE }, - { 0x104500, 192, RI_ALL_OFFLINE }, - { 0x104800, 64, RI_ALL_ONLINE }, - { 0x104900, 192, RI_ALL_OFFLINE }, - { 0x105000, 256, RI_ALL_ONLINE }, - { 0x105400, 768, RI_ALL_OFFLINE }, - { 0x107000, 7, RI_E2E3E3B0_ONLINE }, - { 0x10701c, 1, RI_E3E3B0_ONLINE }, - { 0x108000, 33, RI_E1E1H_ONLINE }, - { 0x1080ac, 5, RI_E1H_ONLINE }, - { 0x108100, 5, RI_E1E1H_ONLINE }, - { 0x108120, 5, RI_E1E1H_ONLINE }, - { 0x108200, 74, RI_E1E1H_ONLINE }, - { 0x108400, 74, RI_E1E1H_ONLINE }, - { 0x108800, 152, RI_E1E1H_ONLINE }, - { 0x110000, 111, RI_E2E3E3B0_ONLINE }, - { 0x1101dc, 1, RI_E3E3B0_ONLINE }, - { 0x110200, 4, RI_E2E3E3B0_ONLINE }, - { 0x120000, 2, RI_ALL_ONLINE }, - { 0x120008, 4, RI_ALL_ONLINE }, - { 0x120018, 3, RI_ALL_ONLINE }, - { 0x120024, 4, RI_ALL_ONLINE }, - { 0x120034, 3, RI_ALL_ONLINE }, - { 0x120040, 4, RI_ALL_ONLINE }, - { 0x120050, 3, RI_ALL_ONLINE }, - { 0x12005c, 4, RI_ALL_ONLINE }, - { 0x12006c, 3, RI_ALL_ONLINE }, - { 0x120078, 4, RI_ALL_ONLINE }, - { 0x120088, 3, RI_ALL_ONLINE }, - { 0x120094, 4, RI_ALL_ONLINE }, - { 0x1200a4, 3, RI_ALL_ONLINE }, - { 0x1200b0, 4, RI_ALL_ONLINE }, - { 0x1200c0, 3, RI_ALL_ONLINE }, - { 0x1200cc, 4, RI_ALL_ONLINE }, - { 0x1200dc, 3, RI_ALL_ONLINE }, - { 0x1200e8, 4, RI_ALL_ONLINE }, - { 0x1200f8, 3, RI_ALL_ONLINE }, - { 0x120104, 4, RI_ALL_ONLINE }, - { 0x120114, 1, RI_ALL_ONLINE }, - { 0x120118, 22, RI_ALL_ONLINE }, - { 0x120170, 2, RI_E1E1H_ONLINE }, - { 0x120178, 243, RI_ALL_ONLINE }, - { 0x120544, 4, RI_E1E1H_ONLINE }, - { 0x120554, 6, RI_ALL_ONLINE }, - { 0x12059c, 6, RI_E1HE2E3E3B0_ONLINE }, - { 0x1205b4, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1205b8, 15, RI_E1HE2E3E3B0_ONLINE }, - { 0x1205f4, 1, RI_E1HE2_ONLINE }, - { 0x1205f8, 4, RI_E2E3E3B0_ONLINE }, - { 0x120618, 1, RI_E2E3E3B0_ONLINE }, - { 0x12061c, 20, RI_E1HE2E3E3B0_ONLINE }, - { 0x12066c, 11, RI_E1HE2E3E3B0_ONLINE }, - { 0x120698, 3, RI_E2E3E3B0_ONLINE }, - { 0x1206a4, 1, RI_E2_ONLINE }, - { 0x1206a8, 1, RI_E2E3E3B0_ONLINE }, - { 0x1206b0, 75, RI_E2E3E3B0_ONLINE }, - { 0x1207dc, 1, RI_E2_ONLINE }, - { 0x1207fc, 1, RI_E2E3E3B0_ONLINE }, - { 0x12080c, 65, RI_ALL_ONLINE }, - { 0x120910, 7, RI_E2E3E3B0_ONLINE }, - { 0x120930, 9, RI_E2E3E3B0_ONLINE }, - { 0x12095c, 37, RI_E3E3B0_ONLINE }, - { 0x120a00, 2, RI_E1E1HE2_ONLINE }, - { 0x120b00, 1, RI_E3E3B0_ONLINE }, - { 0x122000, 2, RI_ALL_ONLINE }, - { 0x122008, 2046, RI_E1_OFFLINE }, - { 0x128000, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x128008, 6142, RI_E1HE2E3E3B0_OFFLINE }, - { 0x130000, 35, RI_E2E3E3B0_ONLINE }, - { 0x130100, 29, RI_E2E3E3B0_ONLINE }, - { 0x130180, 1, RI_E2E3E3B0_ONLINE }, - { 0x130200, 1, RI_E2E3E3B0_ONLINE }, - { 0x130280, 1, RI_E2E3E3B0_ONLINE }, - { 0x130300, 5, RI_E2E3E3B0_ONLINE }, - { 0x130380, 1, RI_E2E3E3B0_ONLINE }, - { 0x130400, 1, RI_E2E3E3B0_ONLINE }, - { 0x130480, 5, RI_E2E3E3B0_ONLINE }, - { 0x130800, 72, RI_E2E3E3B0_ONLINE }, - { 0x131000, 136, RI_E2E3E3B0_ONLINE }, - { 0x132000, 148, RI_E2E3E3B0_ONLINE }, - { 0x134000, 544, RI_E2E3E3B0_ONLINE }, - { 0x140000, 1, RI_ALL_ONLINE }, - { 0x140004, 9, RI_E1E1HE2E3_ONLINE }, - { 0x140028, 8, RI_ALL_ONLINE }, - { 0x140048, 10, RI_E1E1HE2E3_ONLINE }, - { 0x140070, 1, RI_ALL_ONLINE }, - { 0x140074, 10, RI_E1E1HE2E3_ONLINE }, - { 0x14009c, 1, RI_ALL_ONLINE }, - { 0x1400a0, 5, RI_E1E1HE2E3_ONLINE }, - { 0x1400b4, 7, RI_ALL_ONLINE }, - { 0x1400d0, 10, RI_E1E1HE2E3_ONLINE }, - { 0x1400f8, 2, RI_ALL_ONLINE }, - { 0x140100, 5, RI_E1E1H_ONLINE }, - { 0x140114, 5, RI_E1E1HE2E3_ONLINE }, - { 0x140128, 7, RI_ALL_ONLINE }, - { 0x140144, 9, RI_E1E1HE2E3_ONLINE }, - { 0x140168, 8, RI_ALL_ONLINE }, - { 0x140188, 3, RI_E1E1HE2E3_ONLINE }, - { 0x140194, 13, RI_ALL_ONLINE }, - { 0x140200, 6, RI_E1E1HE2E3_ONLINE }, - { 0x140260, 4, RI_E2E3_ONLINE }, - { 0x140280, 4, RI_E2E3_ONLINE }, - { 0x1402e0, 2, RI_E2E3_ONLINE }, - { 0x1402e8, 2, RI_E2E3E3B0_ONLINE }, - { 0x1402f0, 9, RI_E2E3_ONLINE }, - { 0x140314, 44, RI_E3B0_ONLINE }, - { 0x144000, 4, RI_E1E1H_ONLINE }, - { 0x148000, 4, RI_E1E1H_ONLINE }, - { 0x14c000, 4, RI_E1E1H_ONLINE }, - { 0x150000, 4, RI_E1E1H_ONLINE }, - { 0x154000, 4, RI_E1E1H_ONLINE }, - { 0x158000, 4, RI_E1E1H_ONLINE }, - { 0x15c000, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x15c008, 5, RI_E1H_ONLINE }, - { 0x15c020, 8, RI_E2E3E3B0_ONLINE }, - { 0x15c040, 1, RI_E2E3_ONLINE }, - { 0x15c044, 2, RI_E2E3E3B0_ONLINE }, - { 0x15c04c, 8, RI_E2E3_ONLINE }, - { 0x15c06c, 8, RI_E2E3E3B0_ONLINE }, - { 0x15c090, 13, RI_E2E3E3B0_ONLINE }, - { 0x15c0c8, 24, RI_E2E3E3B0_ONLINE }, - { 0x15c128, 2, RI_E2E3_ONLINE }, - { 0x15c130, 8, RI_E2E3E3B0_ONLINE }, - { 0x15c150, 2, RI_E3E3B0_ONLINE }, - { 0x15c158, 2, RI_E3_ONLINE }, - { 0x15c160, 149, RI_E3B0_ONLINE }, - { 0x161000, 7, RI_ALL_ONLINE }, - { 0x16103c, 2, RI_E2E3E3B0_ONLINE }, - { 0x161800, 2, RI_ALL_ONLINE }, - { 0x162000, 54, RI_E3E3B0_ONLINE }, - { 0x162200, 60, RI_E3E3B0_ONLINE }, - { 0x162400, 54, RI_E3E3B0_ONLINE }, - { 0x162600, 60, RI_E3E3B0_ONLINE }, - { 0x162800, 54, RI_E3E3B0_ONLINE }, - { 0x162a00, 60, RI_E3E3B0_ONLINE }, - { 0x162c00, 54, RI_E3E3B0_ONLINE }, - { 0x162e00, 60, RI_E3E3B0_ONLINE }, - { 0x164000, 60, RI_ALL_ONLINE }, - { 0x164110, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x164118, 15, RI_E2E3E3B0_ONLINE }, - { 0x164200, 1, RI_ALL_ONLINE }, - { 0x164208, 1, RI_ALL_ONLINE }, - { 0x164210, 1, RI_ALL_ONLINE }, - { 0x164218, 1, RI_ALL_ONLINE }, - { 0x164220, 1, RI_ALL_ONLINE }, - { 0x164228, 1, RI_ALL_ONLINE }, - { 0x164230, 1, RI_ALL_ONLINE }, - { 0x164238, 1, RI_ALL_ONLINE }, - { 0x164240, 1, RI_ALL_ONLINE }, - { 0x164248, 1, RI_ALL_ONLINE }, - { 0x164250, 1, RI_ALL_ONLINE }, - { 0x164258, 1, RI_ALL_ONLINE }, - { 0x164260, 1, RI_ALL_ONLINE }, - { 0x164270, 2, RI_ALL_ONLINE }, - { 0x164280, 2, RI_ALL_ONLINE }, - { 0x164800, 2, RI_ALL_ONLINE }, - { 0x165000, 2, RI_ALL_ONLINE }, - { 0x166000, 164, RI_ALL_ONLINE }, - { 0x1662cc, 7, RI_E2E3E3B0_ONLINE }, - { 0x166400, 49, RI_ALL_ONLINE }, - { 0x1664c8, 38, RI_ALL_ONLINE }, - { 0x166568, 2, RI_ALL_ONLINE }, - { 0x166570, 5, RI_E2E3E3B0_ONLINE }, - { 0x166800, 1, RI_ALL_ONLINE }, - { 0x168000, 137, RI_ALL_ONLINE }, - { 0x168224, 2, RI_E1E1H_ONLINE }, - { 0x16822c, 29, RI_ALL_ONLINE }, - { 0x1682a0, 12, RI_E1E1H_ONLINE }, - { 0x1682d0, 12, RI_ALL_ONLINE }, - { 0x168300, 2, RI_E1E1H_ONLINE }, - { 0x168308, 68, RI_ALL_ONLINE }, - { 0x168418, 2, RI_E1E1H_ONLINE }, - { 0x168420, 6, RI_ALL_ONLINE }, - { 0x168800, 19, RI_ALL_ONLINE }, - { 0x168900, 1, RI_ALL_ONLINE }, - { 0x168a00, 128, RI_ALL_ONLINE }, - { 0x16a000, 1, RI_ALL_ONLINE }, - { 0x16a004, 1535, RI_ALL_OFFLINE }, - { 0x16c000, 1, RI_ALL_ONLINE }, - { 0x16c004, 1535, RI_ALL_OFFLINE }, - { 0x16e000, 16, RI_E1H_ONLINE }, - { 0x16e040, 8, RI_E2E3E3B0_ONLINE }, - { 0x16e100, 1, RI_E1H_ONLINE }, - { 0x16e200, 2, RI_E1H_ONLINE }, - { 0x16e400, 161, RI_E1H_ONLINE }, - { 0x16e684, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x16e68c, 12, RI_E1H_ONLINE }, - { 0x16e6bc, 4, RI_E1HE2E3E3B0_ONLINE }, - { 0x16e6cc, 4, RI_E1H_ONLINE }, - { 0x16e6e0, 2, RI_E2E3E3B0_ONLINE }, - { 0x16e6e8, 5, RI_E2E3_ONLINE }, - { 0x16e6fc, 5, RI_E2E3E3B0_ONLINE }, - { 0x16e768, 17, RI_E2E3E3B0_ONLINE }, - { 0x16e7ac, 12, RI_E3B0_ONLINE }, - { 0x170000, 24, RI_ALL_ONLINE }, - { 0x170060, 4, RI_E1E1H_ONLINE }, - { 0x170070, 65, RI_ALL_ONLINE }, - { 0x170194, 11, RI_E2E3E3B0_ONLINE }, - { 0x1701c4, 1, RI_E2E3E3B0_ONLINE }, - { 0x1701cc, 7, RI_E2E3E3B0_ONLINE }, - { 0x1701e8, 1, RI_E3E3B0_ONLINE }, - { 0x1701ec, 1, RI_E2E3E3B0_ONLINE }, - { 0x1701f4, 1, RI_E2E3E3B0_ONLINE }, - { 0x170200, 4, RI_ALL_ONLINE }, - { 0x170214, 1, RI_ALL_ONLINE }, - { 0x170218, 77, RI_E2E3E3B0_ONLINE }, - { 0x170400, 64, RI_E2E3E3B0_ONLINE }, - { 0x178000, 1, RI_ALL_ONLINE }, - { 0x180000, 61, RI_ALL_ONLINE }, - { 0x18013c, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x180200, 58, RI_ALL_ONLINE }, - { 0x180340, 4, RI_ALL_ONLINE }, - { 0x180380, 1, RI_E2E3E3B0_ONLINE }, - { 0x180388, 1, RI_E2E3E3B0_ONLINE }, - { 0x180390, 1, RI_E2E3E3B0_ONLINE }, - { 0x180398, 1, RI_E2E3E3B0_ONLINE }, - { 0x1803a0, 5, RI_E2E3E3B0_ONLINE }, - { 0x1803b4, 2, RI_E3E3B0_ONLINE }, - { 0x180404, 255, RI_E1E1H_OFFLINE }, - { 0x181000, 4, RI_ALL_ONLINE }, - { 0x181010, 1020, RI_ALL_OFFLINE }, - { 0x182000, 4, RI_E3E3B0_ONLINE }, - { 0x1a0000, 1, RI_ALL_ONLINE }, - { 0x1a0004, 5631, RI_ALL_OFFLINE }, - { 0x1a5800, 2560, RI_E1HE2E3E3B0_OFFLINE }, - { 0x1a8000, 1, RI_ALL_ONLINE }, - { 0x1a8004, 8191, RI_E1HE2E3E3B0_OFFLINE }, - { 0x1b0000, 1, RI_ALL_ONLINE }, - { 0x1b0004, 15, RI_E1H_OFFLINE }, - { 0x1b0040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b0044, 239, RI_E1H_OFFLINE }, - { 0x1b0400, 1, RI_ALL_ONLINE }, - { 0x1b0404, 255, RI_E1H_OFFLINE }, - { 0x1b0800, 1, RI_ALL_ONLINE }, - { 0x1b0840, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b0c00, 1, RI_ALL_ONLINE }, - { 0x1b1000, 1, RI_ALL_ONLINE }, - { 0x1b1040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b1400, 1, RI_ALL_ONLINE }, - { 0x1b1440, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b1480, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b14c0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b1800, 128, RI_ALL_OFFLINE }, - { 0x1b1c00, 128, RI_ALL_OFFLINE }, - { 0x1b2000, 1, RI_ALL_ONLINE }, - { 0x1b2400, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b2404, 5631, RI_E2E3E3B0_OFFLINE }, - { 0x1b8000, 1, RI_ALL_ONLINE }, - { 0x1b8040, 1, RI_ALL_ONLINE }, - { 0x1b8080, 1, RI_ALL_ONLINE }, - { 0x1b80c0, 1, RI_ALL_ONLINE }, - { 0x1b8100, 1, RI_ALL_ONLINE }, - { 0x1b8140, 1, RI_ALL_ONLINE }, - { 0x1b8180, 1, RI_ALL_ONLINE }, - { 0x1b81c0, 1, RI_ALL_ONLINE }, - { 0x1b8200, 1, RI_ALL_ONLINE }, - { 0x1b8240, 1, RI_ALL_ONLINE }, - { 0x1b8280, 1, RI_ALL_ONLINE }, - { 0x1b82c0, 1, RI_ALL_ONLINE }, - { 0x1b8300, 1, RI_ALL_ONLINE }, - { 0x1b8340, 1, RI_ALL_ONLINE }, - { 0x1b8380, 1, RI_ALL_ONLINE }, - { 0x1b83c0, 1, RI_ALL_ONLINE }, - { 0x1b8400, 1, RI_ALL_ONLINE }, - { 0x1b8440, 1, RI_ALL_ONLINE }, - { 0x1b8480, 1, RI_ALL_ONLINE }, - { 0x1b84c0, 1, RI_ALL_ONLINE }, - { 0x1b8500, 1, RI_ALL_ONLINE }, - { 0x1b8540, 1, RI_ALL_ONLINE }, - { 0x1b8580, 1, RI_ALL_ONLINE }, - { 0x1b85c0, 19, RI_E2E3E3B0_ONLINE }, - { 0x1b8800, 1, RI_ALL_ONLINE }, - { 0x1b8840, 1, RI_ALL_ONLINE }, - { 0x1b8880, 1, RI_ALL_ONLINE }, - { 0x1b88c0, 1, RI_ALL_ONLINE }, - { 0x1b8900, 1, RI_ALL_ONLINE }, - { 0x1b8940, 1, RI_ALL_ONLINE }, - { 0x1b8980, 1, RI_ALL_ONLINE }, - { 0x1b89c0, 1, RI_ALL_ONLINE }, - { 0x1b8a00, 1, RI_ALL_ONLINE }, - { 0x1b8a40, 1, RI_ALL_ONLINE }, - { 0x1b8a80, 1, RI_ALL_ONLINE }, - { 0x1b8ac0, 1, RI_ALL_ONLINE }, - { 0x1b8b00, 1, RI_ALL_ONLINE }, - { 0x1b8b40, 1, RI_ALL_ONLINE }, - { 0x1b8b80, 1, RI_ALL_ONLINE }, - { 0x1b8bc0, 1, RI_ALL_ONLINE }, - { 0x1b8c00, 1, RI_ALL_ONLINE }, - { 0x1b8c40, 1, RI_ALL_ONLINE }, - { 0x1b8c80, 1, RI_ALL_ONLINE }, - { 0x1b8cc0, 1, RI_ALL_ONLINE }, - { 0x1b8cc4, 1, RI_E2E3E3B0_ONLINE }, - { 0x1b8d00, 1, RI_ALL_ONLINE }, - { 0x1b8d40, 1, RI_ALL_ONLINE }, - { 0x1b8d80, 1, RI_ALL_ONLINE }, - { 0x1b8dc0, 1, RI_ALL_ONLINE }, - { 0x1b8e00, 1, RI_ALL_ONLINE }, - { 0x1b8e40, 1, RI_ALL_ONLINE }, - { 0x1b8e80, 1, RI_ALL_ONLINE }, - { 0x1b8e84, 1, RI_E2E3E3B0_ONLINE }, - { 0x1b8ec0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8f00, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8f40, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8f80, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8fc0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8fc4, 2, RI_E2E3E3B0_ONLINE }, - { 0x1b8fd0, 6, RI_E2E3E3B0_ONLINE }, - { 0x1b8fe8, 2, RI_E3E3B0_ONLINE }, - { 0x1b9000, 1, RI_E2E3E3B0_ONLINE }, - { 0x1b9040, 3, RI_E2E3E3B0_ONLINE }, - { 0x1b905c, 1, RI_E3E3B0_ONLINE }, - { 0x1b9064, 1, RI_E3B0_ONLINE }, - { 0x1b9080, 10, RI_E3B0_ONLINE }, - { 0x1b9400, 14, RI_E2E3E3B0_OFFLINE }, - { 0x1b943c, 19, RI_E2E3E3B0_OFFLINE }, - { 0x1b9490, 10, RI_E2E3E3B0_OFFLINE }, - { 0x1c0000, 2, RI_ALL_ONLINE }, - { 0x200000, 65, RI_ALL_ONLINE }, - { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x200200, 58, RI_ALL_ONLINE }, - { 0x200340, 4, RI_ALL_ONLINE }, - { 0x200380, 1, RI_E2E3E3B0_ONLINE }, - { 0x200388, 1, RI_E2E3E3B0_ONLINE }, - { 0x200390, 1, RI_E2E3E3B0_ONLINE }, - { 0x200398, 1, RI_E2E3E3B0_ONLINE }, - { 0x2003a0, 1, RI_E2E3E3B0_ONLINE }, - { 0x2003a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x200404, 255, RI_E1E1H_OFFLINE }, - { 0x202000, 4, RI_ALL_ONLINE }, - { 0x202010, 2044, RI_ALL_OFFLINE }, - { 0x204000, 4, RI_E3E3B0_ONLINE }, - { 0x220000, 1, RI_ALL_ONLINE }, - { 0x220004, 5631, RI_ALL_OFFLINE }, - { 0x225800, 2560, RI_E1HE2E3E3B0_OFFLINE }, - { 0x228000, 1, RI_ALL_ONLINE }, - { 0x228004, 8191, RI_E1HE2E3E3B0_OFFLINE }, - { 0x230000, 1, RI_ALL_ONLINE }, - { 0x230004, 15, RI_E1H_OFFLINE }, - { 0x230040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x230044, 239, RI_E1H_OFFLINE }, - { 0x230400, 1, RI_ALL_ONLINE }, - { 0x230404, 255, RI_E1H_OFFLINE }, - { 0x230800, 1, RI_ALL_ONLINE }, - { 0x230840, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x230c00, 1, RI_ALL_ONLINE }, - { 0x231000, 1, RI_ALL_ONLINE }, - { 0x231040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x231400, 1, RI_ALL_ONLINE }, - { 0x231440, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x231480, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2314c0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x231800, 128, RI_ALL_OFFLINE }, - { 0x231c00, 128, RI_ALL_OFFLINE }, - { 0x232000, 1, RI_ALL_ONLINE }, - { 0x232400, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x232404, 5631, RI_E2E3E3B0_OFFLINE }, - { 0x238000, 1, RI_ALL_ONLINE }, - { 0x238040, 1, RI_ALL_ONLINE }, - { 0x238080, 1, RI_ALL_ONLINE }, - { 0x2380c0, 1, RI_ALL_ONLINE }, - { 0x238100, 1, RI_ALL_ONLINE }, - { 0x238140, 1, RI_ALL_ONLINE }, - { 0x238180, 1, RI_ALL_ONLINE }, - { 0x2381c0, 1, RI_ALL_ONLINE }, - { 0x238200, 1, RI_ALL_ONLINE }, - { 0x238240, 1, RI_ALL_ONLINE }, - { 0x238280, 1, RI_ALL_ONLINE }, - { 0x2382c0, 1, RI_ALL_ONLINE }, - { 0x238300, 1, RI_ALL_ONLINE }, - { 0x238340, 1, RI_ALL_ONLINE }, - { 0x238380, 1, RI_ALL_ONLINE }, - { 0x2383c0, 1, RI_ALL_ONLINE }, - { 0x238400, 1, RI_ALL_ONLINE }, - { 0x238440, 1, RI_ALL_ONLINE }, - { 0x238480, 1, RI_ALL_ONLINE }, - { 0x2384c0, 1, RI_ALL_ONLINE }, - { 0x238500, 1, RI_ALL_ONLINE }, - { 0x238540, 1, RI_ALL_ONLINE }, - { 0x238580, 1, RI_ALL_ONLINE }, - { 0x2385c0, 19, RI_E2E3E3B0_ONLINE }, - { 0x238800, 1, RI_ALL_ONLINE }, - { 0x238840, 1, RI_ALL_ONLINE }, - { 0x238880, 1, RI_ALL_ONLINE }, - { 0x2388c0, 1, RI_ALL_ONLINE }, - { 0x238900, 1, RI_ALL_ONLINE }, - { 0x238940, 1, RI_ALL_ONLINE }, - { 0x238980, 1, RI_ALL_ONLINE }, - { 0x2389c0, 1, RI_ALL_ONLINE }, - { 0x238a00, 1, RI_ALL_ONLINE }, - { 0x238a40, 1, RI_ALL_ONLINE }, - { 0x238a80, 1, RI_ALL_ONLINE }, - { 0x238ac0, 1, RI_ALL_ONLINE }, - { 0x238b00, 1, RI_ALL_ONLINE }, - { 0x238b40, 1, RI_ALL_ONLINE }, - { 0x238b80, 1, RI_ALL_ONLINE }, - { 0x238bc0, 1, RI_ALL_ONLINE }, - { 0x238c00, 1, RI_ALL_ONLINE }, - { 0x238c40, 1, RI_ALL_ONLINE }, - { 0x238c80, 1, RI_ALL_ONLINE }, - { 0x238cc0, 1, RI_ALL_ONLINE }, - { 0x238cc4, 1, RI_E2E3E3B0_ONLINE }, - { 0x238d00, 1, RI_ALL_ONLINE }, - { 0x238d40, 1, RI_ALL_ONLINE }, - { 0x238d80, 1, RI_ALL_ONLINE }, - { 0x238dc0, 1, RI_ALL_ONLINE }, - { 0x238e00, 1, RI_ALL_ONLINE }, - { 0x238e40, 1, RI_ALL_ONLINE }, - { 0x238e80, 1, RI_ALL_ONLINE }, - { 0x238e84, 1, RI_E2E3E3B0_ONLINE }, - { 0x238ec0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238f00, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238f40, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238f80, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238fc0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238fc4, 2, RI_E2E3E3B0_ONLINE }, - { 0x238fd0, 6, RI_E2E3E3B0_ONLINE }, - { 0x238fe8, 2, RI_E3E3B0_ONLINE }, - { 0x239000, 1, RI_E2E3E3B0_ONLINE }, - { 0x239040, 3, RI_E2E3E3B0_ONLINE }, - { 0x23905c, 1, RI_E3E3B0_ONLINE }, - { 0x239064, 1, RI_E3B0_ONLINE }, - { 0x239080, 10, RI_E3B0_ONLINE }, - { 0x240000, 2, RI_ALL_ONLINE }, - { 0x280000, 65, RI_ALL_ONLINE }, - { 0x28014c, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x280200, 58, RI_ALL_ONLINE }, - { 0x280340, 4, RI_ALL_ONLINE }, - { 0x280380, 1, RI_E2E3E3B0_ONLINE }, - { 0x280388, 1, RI_E2E3E3B0_ONLINE }, - { 0x280390, 1, RI_E2E3E3B0_ONLINE }, - { 0x280398, 1, RI_E2E3E3B0_ONLINE }, - { 0x2803a0, 1, RI_E2E3E3B0_ONLINE }, - { 0x2803a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x280404, 255, RI_E1E1H_OFFLINE }, - { 0x282000, 4, RI_ALL_ONLINE }, - { 0x282010, 2044, RI_ALL_OFFLINE }, - { 0x284000, 4, RI_E3E3B0_ONLINE }, - { 0x2a0000, 1, RI_ALL_ONLINE }, - { 0x2a0004, 5631, RI_ALL_OFFLINE }, - { 0x2a5800, 2560, RI_E1HE2E3E3B0_OFFLINE }, - { 0x2a8000, 1, RI_ALL_ONLINE }, - { 0x2a8004, 8191, RI_E1HE2E3E3B0_OFFLINE }, - { 0x2b0000, 1, RI_ALL_ONLINE }, - { 0x2b0004, 15, RI_E1H_OFFLINE }, - { 0x2b0040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b0044, 239, RI_E1H_OFFLINE }, - { 0x2b0400, 1, RI_ALL_ONLINE }, - { 0x2b0404, 255, RI_E1H_OFFLINE }, - { 0x2b0800, 1, RI_ALL_ONLINE }, - { 0x2b0840, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b0c00, 1, RI_ALL_ONLINE }, - { 0x2b1000, 1, RI_ALL_ONLINE }, - { 0x2b1040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b1400, 1, RI_ALL_ONLINE }, - { 0x2b1440, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b1480, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b14c0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b1800, 128, RI_ALL_OFFLINE }, - { 0x2b1c00, 128, RI_ALL_OFFLINE }, - { 0x2b2000, 1, RI_ALL_ONLINE }, - { 0x2b2400, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b2404, 5631, RI_E2E3E3B0_OFFLINE }, - { 0x2b8000, 1, RI_ALL_ONLINE }, - { 0x2b8040, 1, RI_ALL_ONLINE }, - { 0x2b8080, 1, RI_ALL_ONLINE }, - { 0x2b80c0, 1, RI_ALL_ONLINE }, - { 0x2b8100, 1, RI_ALL_ONLINE }, - { 0x2b8140, 1, RI_ALL_ONLINE }, - { 0x2b8180, 1, RI_ALL_ONLINE }, - { 0x2b81c0, 1, RI_ALL_ONLINE }, - { 0x2b8200, 1, RI_ALL_ONLINE }, - { 0x2b8240, 1, RI_ALL_ONLINE }, - { 0x2b8280, 1, RI_ALL_ONLINE }, - { 0x2b82c0, 1, RI_ALL_ONLINE }, - { 0x2b8300, 1, RI_ALL_ONLINE }, - { 0x2b8340, 1, RI_ALL_ONLINE }, - { 0x2b8380, 1, RI_ALL_ONLINE }, - { 0x2b83c0, 1, RI_ALL_ONLINE }, - { 0x2b8400, 1, RI_ALL_ONLINE }, - { 0x2b8440, 1, RI_ALL_ONLINE }, - { 0x2b8480, 1, RI_ALL_ONLINE }, - { 0x2b84c0, 1, RI_ALL_ONLINE }, - { 0x2b8500, 1, RI_ALL_ONLINE }, - { 0x2b8540, 1, RI_ALL_ONLINE }, - { 0x2b8580, 1, RI_ALL_ONLINE }, - { 0x2b85c0, 19, RI_E2E3E3B0_ONLINE }, - { 0x2b8800, 1, RI_ALL_ONLINE }, - { 0x2b8840, 1, RI_ALL_ONLINE }, - { 0x2b8880, 1, RI_ALL_ONLINE }, - { 0x2b88c0, 1, RI_ALL_ONLINE }, - { 0x2b8900, 1, RI_ALL_ONLINE }, - { 0x2b8940, 1, RI_ALL_ONLINE }, - { 0x2b8980, 1, RI_ALL_ONLINE }, - { 0x2b89c0, 1, RI_ALL_ONLINE }, - { 0x2b8a00, 1, RI_ALL_ONLINE }, - { 0x2b8a40, 1, RI_ALL_ONLINE }, - { 0x2b8a80, 1, RI_ALL_ONLINE }, - { 0x2b8ac0, 1, RI_ALL_ONLINE }, - { 0x2b8b00, 1, RI_ALL_ONLINE }, - { 0x2b8b40, 1, RI_ALL_ONLINE }, - { 0x2b8b80, 1, RI_ALL_ONLINE }, - { 0x2b8bc0, 1, RI_ALL_ONLINE }, - { 0x2b8c00, 1, RI_ALL_ONLINE }, - { 0x2b8c40, 1, RI_ALL_ONLINE }, - { 0x2b8c80, 1, RI_ALL_ONLINE }, - { 0x2b8cc0, 1, RI_ALL_ONLINE }, - { 0x2b8cc4, 1, RI_E2E3E3B0_ONLINE }, - { 0x2b8d00, 1, RI_ALL_ONLINE }, - { 0x2b8d40, 1, RI_ALL_ONLINE }, - { 0x2b8d80, 1, RI_ALL_ONLINE }, - { 0x2b8dc0, 1, RI_ALL_ONLINE }, - { 0x2b8e00, 1, RI_ALL_ONLINE }, - { 0x2b8e40, 1, RI_ALL_ONLINE }, - { 0x2b8e80, 1, RI_ALL_ONLINE }, - { 0x2b8e84, 1, RI_E2E3E3B0_ONLINE }, - { 0x2b8ec0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8f00, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8f40, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8f80, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8fc0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8fc4, 2, RI_E2E3E3B0_ONLINE }, - { 0x2b8fd0, 6, RI_E2E3E3B0_ONLINE }, - { 0x2b8fe8, 2, RI_E3E3B0_ONLINE }, - { 0x2b9000, 1, RI_E2E3E3B0_ONLINE }, - { 0x2b9040, 3, RI_E2E3E3B0_ONLINE }, - { 0x2b905c, 1, RI_E3E3B0_ONLINE }, - { 0x2b9064, 1, RI_E3B0_ONLINE }, - { 0x2b9080, 10, RI_E3B0_ONLINE }, - { 0x2b9400, 14, RI_E2E3E3B0_ONLINE }, - { 0x2b943c, 19, RI_E2E3E3B0_ONLINE }, - { 0x2b9490, 10, RI_E2E3E3B0_ONLINE }, - { 0x2c0000, 2, RI_ALL_ONLINE }, - { 0x300000, 65, RI_ALL_ONLINE }, - { 0x30014c, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x300200, 58, RI_ALL_ONLINE }, - { 0x300340, 4, RI_ALL_ONLINE }, - { 0x300380, 1, RI_E2E3E3B0_ONLINE }, - { 0x300388, 1, RI_E2E3E3B0_ONLINE }, - { 0x300390, 1, RI_E2E3E3B0_ONLINE }, - { 0x300398, 1, RI_E2E3E3B0_ONLINE }, - { 0x3003a0, 1, RI_E2E3E3B0_ONLINE }, - { 0x3003a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x300404, 255, RI_E1E1H_OFFLINE }, - { 0x302000, 4, RI_ALL_ONLINE }, - { 0x302010, 2044, RI_ALL_OFFLINE }, - { 0x304000, 4, RI_E3E3B0_ONLINE }, - { 0x320000, 1, RI_ALL_ONLINE }, - { 0x320004, 5631, RI_ALL_OFFLINE }, - { 0x325800, 2560, RI_E1HE2E3E3B0_OFFLINE }, - { 0x328000, 1, RI_ALL_ONLINE }, - { 0x328004, 8191, RI_E1HE2E3E3B0_OFFLINE }, - { 0x330000, 1, RI_ALL_ONLINE }, - { 0x330004, 15, RI_E1H_OFFLINE }, - { 0x330040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x330044, 239, RI_E1H_OFFLINE }, - { 0x330400, 1, RI_ALL_ONLINE }, - { 0x330404, 255, RI_E1H_OFFLINE }, - { 0x330800, 1, RI_ALL_ONLINE }, - { 0x330840, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x330c00, 1, RI_ALL_ONLINE }, - { 0x331000, 1, RI_ALL_ONLINE }, - { 0x331040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x331400, 1, RI_ALL_ONLINE }, - { 0x331440, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x331480, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x3314c0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x331800, 128, RI_ALL_OFFLINE }, - { 0x331c00, 128, RI_ALL_OFFLINE }, - { 0x332000, 1, RI_ALL_ONLINE }, - { 0x332400, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x332404, 5631, RI_E2E3E3B0_OFFLINE }, - { 0x338000, 1, RI_ALL_ONLINE }, - { 0x338040, 1, RI_ALL_ONLINE }, - { 0x338080, 1, RI_ALL_ONLINE }, - { 0x3380c0, 1, RI_ALL_ONLINE }, - { 0x338100, 1, RI_ALL_ONLINE }, - { 0x338140, 1, RI_ALL_ONLINE }, - { 0x338180, 1, RI_ALL_ONLINE }, - { 0x3381c0, 1, RI_ALL_ONLINE }, - { 0x338200, 1, RI_ALL_ONLINE }, - { 0x338240, 1, RI_ALL_ONLINE }, - { 0x338280, 1, RI_ALL_ONLINE }, - { 0x3382c0, 1, RI_ALL_ONLINE }, - { 0x338300, 1, RI_ALL_ONLINE }, - { 0x338340, 1, RI_ALL_ONLINE }, - { 0x338380, 1, RI_ALL_ONLINE }, - { 0x3383c0, 1, RI_ALL_ONLINE }, - { 0x338400, 1, RI_ALL_ONLINE }, - { 0x338440, 1, RI_ALL_ONLINE }, - { 0x338480, 1, RI_ALL_ONLINE }, - { 0x3384c0, 1, RI_ALL_ONLINE }, - { 0x338500, 1, RI_ALL_ONLINE }, - { 0x338540, 1, RI_ALL_ONLINE }, - { 0x338580, 1, RI_ALL_ONLINE }, - { 0x3385c0, 19, RI_E2E3E3B0_ONLINE }, - { 0x338800, 1, RI_ALL_ONLINE }, - { 0x338840, 1, RI_ALL_ONLINE }, - { 0x338880, 1, RI_ALL_ONLINE }, - { 0x3388c0, 1, RI_ALL_ONLINE }, - { 0x338900, 1, RI_ALL_ONLINE }, - { 0x338940, 1, RI_ALL_ONLINE }, - { 0x338980, 1, RI_ALL_ONLINE }, - { 0x3389c0, 1, RI_ALL_ONLINE }, - { 0x338a00, 1, RI_ALL_ONLINE }, - { 0x338a40, 1, RI_ALL_ONLINE }, - { 0x338a80, 1, RI_ALL_ONLINE }, - { 0x338ac0, 1, RI_ALL_ONLINE }, - { 0x338b00, 1, RI_ALL_ONLINE }, - { 0x338b40, 1, RI_ALL_ONLINE }, - { 0x338b80, 1, RI_ALL_ONLINE }, - { 0x338bc0, 1, RI_ALL_ONLINE }, - { 0x338c00, 1, RI_ALL_ONLINE }, - { 0x338c40, 1, RI_ALL_ONLINE }, - { 0x338c80, 1, RI_ALL_ONLINE }, - { 0x338cc0, 1, RI_ALL_ONLINE }, - { 0x338cc4, 1, RI_E2E3E3B0_ONLINE }, - { 0x338d00, 1, RI_ALL_ONLINE }, - { 0x338d40, 1, RI_ALL_ONLINE }, - { 0x338d80, 1, RI_ALL_ONLINE }, - { 0x338dc0, 1, RI_ALL_ONLINE }, - { 0x338e00, 1, RI_ALL_ONLINE }, - { 0x338e40, 1, RI_ALL_ONLINE }, - { 0x338e80, 1, RI_ALL_ONLINE }, - { 0x338e84, 1, RI_E2E3E3B0_ONLINE }, - { 0x338ec0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338f00, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338f40, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338f80, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338fc0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338fc4, 2, RI_E2E3E3B0_ONLINE }, - { 0x338fd0, 6, RI_E2E3E3B0_ONLINE }, - { 0x338fe8, 2, RI_E3E3B0_ONLINE }, - { 0x339000, 1, RI_E2E3E3B0_ONLINE }, - { 0x339040, 3, RI_E2E3E3B0_ONLINE }, - { 0x33905c, 1, RI_E3E3B0_ONLINE }, - { 0x339064, 1, RI_E3B0_ONLINE }, - { 0x339080, 10, RI_E3B0_ONLINE }, - { 0x340000, 2, RI_ALL_ONLINE }, + { 0x2000, 1, 0x1f, 0xfff}, + { 0x2004, 1, 0x1f, 0x1fff}, + { 0x2008, 25, 0x1f, 0xfff}, + { 0x206c, 1, 0x1f, 0x1fff}, + { 0x2070, 313, 0x1f, 0xfff}, + { 0x2800, 103, 0x1f, 0xfff}, + { 0x3000, 287, 0x1f, 0xfff}, + { 0x3800, 331, 0x1f, 0xfff}, + { 0x8800, 6, 0x1f, 0x924}, + { 0x8818, 1, 0x1e, 0x924}, + { 0x9000, 4, 0x1c, 0x924}, + { 0x9010, 7, 0x1c, 0xfff}, + { 0x902c, 1, 0x1c, 0x924}, + { 0x9030, 1, 0x1c, 0xfff}, + { 0x9034, 13, 0x1c, 0x924}, + { 0x9068, 16, 0x1c, 0xfff}, + { 0x90a8, 98, 0x1c, 0x924}, + { 0x9230, 2, 0x1c, 0xfff}, + { 0x9238, 3, 0x1c, 0x924}, + { 0x9244, 1, 0x1c, 0xfff}, + { 0x9248, 1, 0x1c, 0x924}, + { 0x924c, 1, 0x4, 0x924}, + { 0x9250, 16, 0x1c, 0x924}, + { 0x92a8, 2, 0x1c, 0x1fff}, + { 0x92b4, 1, 0x1c, 0x1fff}, + { 0x9400, 33, 0x1c, 0x924}, + { 0x9484, 5, 0x18, 0x924}, + { 0xa000, 27, 0x1f, 0x924}, + { 0xa06c, 1, 0x3, 0x924}, + { 0xa070, 2, 0x1f, 0x924}, + { 0xa078, 1, 0x1f, 0x1fff}, + { 0xa07c, 31, 0x1f, 0x924}, + { 0xa0f8, 1, 0x1f, 0x1fff}, + { 0xa0fc, 3, 0x1f, 0x924}, + { 0xa108, 1, 0x1f, 0x1fff}, + { 0xa10c, 3, 0x1f, 0x924}, + { 0xa118, 1, 0x1f, 0x1fff}, + { 0xa11c, 28, 0x1f, 0x924}, + { 0xa18c, 4, 0x3, 0x924}, + { 0xa19c, 3, 0x1f, 0x924}, + { 0xa1a8, 1, 0x1f, 0x1fff}, + { 0xa1ac, 3, 0x1f, 0x924}, + { 0xa1b8, 1, 0x1f, 0x1fff}, + { 0xa1bc, 54, 0x1f, 0x924}, + { 0xa294, 2, 0x3, 0x924}, + { 0xa29c, 2, 0x1f, 0x924}, + { 0xa2a4, 2, 0x7, 0x924}, + { 0xa2ac, 2, 0x1f, 0x924}, + { 0xa2b4, 1, 0x1f, 0x1fff}, + { 0xa2b8, 49, 0x1f, 0x924}, + { 0xa38c, 2, 0x1f, 0x1fff}, + { 0xa398, 1, 0x1f, 0x1fff}, + { 0xa39c, 7, 0x1e, 0x924}, + { 0xa3b8, 2, 0x18, 0x924}, + { 0xa3c0, 1, 0x1e, 0x924}, + { 0xa3c4, 1, 0x1e, 0xfff}, + { 0xa3c8, 1, 0x1e, 0x924}, + { 0xa3d0, 1, 0x1e, 0x924}, + { 0xa3d8, 1, 0x1e, 0x924}, + { 0xa3e0, 1, 0x1e, 0x924}, + { 0xa3e8, 1, 0x1e, 0x924}, + { 0xa3f0, 1, 0x1e, 0x924}, + { 0xa3f8, 1, 0x1e, 0x924}, + { 0xa400, 1, 0x1f, 0x924}, + { 0xa404, 1, 0x1f, 0xfff}, + { 0xa408, 2, 0x1f, 0x1fff}, + { 0xa410, 7, 0x1f, 0x924}, + { 0xa42c, 12, 0x1f, 0xfff}, + { 0xa45c, 1, 0x1f, 0x924}, + { 0xa460, 1, 0x1f, 0x1924}, + { 0xa464, 15, 0x1f, 0x924}, + { 0xa4a0, 1, 0x7, 0x924}, + { 0xa4a4, 2, 0x1f, 0x924}, + { 0xa4ac, 2, 0x3, 0x924}, + { 0xa4b4, 1, 0x7, 0x924}, + { 0xa4b8, 2, 0x3, 0x924}, + { 0xa4c0, 3, 0x1f, 0x924}, + { 0xa4cc, 5, 0x3, 0x924}, + { 0xa4e0, 3, 0x1f, 0x924}, + { 0xa4fc, 2, 0x1f, 0x924}, + { 0xa504, 1, 0x3, 0x924}, + { 0xa508, 3, 0x1f, 0x924}, + { 0xa518, 1, 0x1f, 0x924}, + { 0xa520, 1, 0x1f, 0x924}, + { 0xa528, 1, 0x1f, 0x924}, + { 0xa530, 1, 0x1f, 0x924}, + { 0xa538, 1, 0x1f, 0x924}, + { 0xa540, 1, 0x1f, 0x924}, + { 0xa548, 1, 0x3, 0x924}, + { 0xa550, 1, 0x3, 0x924}, + { 0xa558, 1, 0x3, 0x924}, + { 0xa560, 1, 0x3, 0x924}, + { 0xa568, 1, 0x3, 0x924}, + { 0xa570, 1, 0x1f, 0x924}, + { 0xa580, 1, 0x1f, 0x1fff}, + { 0xa590, 1, 0x1f, 0x1fff}, + { 0xa5a0, 1, 0x7, 0x924}, + { 0xa5c0, 1, 0x1f, 0x924}, + { 0xa5e0, 1, 0x1e, 0x924}, + { 0xa5e8, 1, 0x1e, 0x924}, + { 0xa5f0, 1, 0x1e, 0x924}, + { 0xa5f8, 1, 0x6, 0x924}, + { 0xa5fc, 1, 0x1e, 0x924}, + { 0xa600, 5, 0x1e, 0xfff}, + { 0xa614, 1, 0x1e, 0x924}, + { 0xa618, 1, 0x1e, 0xfff}, + { 0xa61c, 1, 0x1e, 0x924}, + { 0xa620, 6, 0x1c, 0x924}, + { 0xa638, 20, 0x4, 0x924}, + { 0xa688, 35, 0x1c, 0x924}, + { 0xa714, 1, 0x1c, 0xfff}, + { 0xa718, 2, 0x1c, 0x924}, + { 0xa720, 1, 0x1c, 0xfff}, + { 0xa724, 3, 0x1c, 0x924}, + { 0xa730, 1, 0x4, 0x924}, + { 0xa734, 2, 0x1c, 0x924}, + { 0xa73c, 4, 0x4, 0x924}, + { 0xa74c, 1, 0x1c, 0x924}, + { 0xa750, 1, 0x1c, 0xfff}, + { 0xa754, 3, 0x1c, 0x924}, + { 0xa760, 5, 0x4, 0x924}, + { 0xa774, 7, 0x1c, 0x924}, + { 0xa790, 15, 0x4, 0x924}, + { 0xa7cc, 4, 0x1c, 0x924}, + { 0xa7e0, 6, 0x18, 0x924}, + { 0xa800, 18, 0x4, 0x924}, + { 0xa848, 33, 0x1c, 0x924}, + { 0xa8cc, 2, 0x18, 0x924}, + { 0xa8d4, 4, 0x1c, 0x924}, + { 0xa8e4, 1, 0x18, 0x924}, + { 0xa8e8, 1, 0x1c, 0x924}, + { 0xa8f0, 1, 0x1c, 0x924}, + { 0xa8f8, 30, 0x18, 0x924}, + { 0xa974, 73, 0x18, 0x924}, + { 0xac30, 1, 0x18, 0x924}, + { 0xac40, 1, 0x18, 0x924}, + { 0xac50, 1, 0x18, 0x924}, + { 0xac60, 1, 0x10, 0x924}, + { 0x10000, 9, 0x1f, 0x924}, + { 0x10024, 1, 0x7, 0x924}, + { 0x10028, 5, 0x1f, 0x924}, + { 0x1003c, 6, 0x7, 0x924}, + { 0x10054, 20, 0x1f, 0x924}, + { 0x100a4, 4, 0x7, 0x924}, + { 0x100b4, 11, 0x1f, 0x924}, + { 0x100e0, 4, 0x7, 0x924}, + { 0x100f0, 8, 0x1f, 0x924}, + { 0x10110, 6, 0x7, 0x924}, + { 0x10128, 110, 0x1f, 0x924}, + { 0x102e0, 4, 0x7, 0x924}, + { 0x102f0, 18, 0x1f, 0x924}, + { 0x10338, 20, 0x7, 0x924}, + { 0x10388, 10, 0x1f, 0x924}, + { 0x103d0, 2, 0x3, 0x1fff}, + { 0x103dc, 1, 0x3, 0x1fff}, + { 0x10400, 6, 0x7, 0x924}, + { 0x10418, 1, 0x1f, 0xfff}, + { 0x1041c, 1, 0x1f, 0x924}, + { 0x10420, 1, 0x1f, 0xfff}, + { 0x10424, 1, 0x1f, 0x924}, + { 0x10428, 1, 0x1f, 0xfff}, + { 0x1042c, 1, 0x1f, 0x924}, + { 0x10430, 10, 0x7, 0x924}, + { 0x10458, 2, 0x1f, 0x924}, + { 0x10460, 1, 0x1f, 0xfff}, + { 0x10464, 4, 0x1f, 0x924}, + { 0x10474, 1, 0x1f, 0xfff}, + { 0x10478, 14, 0x1f, 0x924}, + { 0x104b0, 12, 0x7, 0x924}, + { 0x104e0, 1, 0x1f, 0xfff}, + { 0x104e8, 1, 0x1f, 0x924}, + { 0x104ec, 1, 0x1f, 0xfff}, + { 0x104f4, 1, 0x1f, 0x924}, + { 0x104f8, 1, 0x1f, 0xfff}, + { 0x10500, 2, 0x1f, 0x924}, + { 0x10508, 1, 0x1f, 0xfff}, + { 0x1050c, 9, 0x1f, 0x924}, + { 0x10530, 1, 0x1f, 0xfff}, + { 0x10534, 1, 0x1f, 0x924}, + { 0x10538, 1, 0x1f, 0xfff}, + { 0x1053c, 3, 0x1f, 0x924}, + { 0x10548, 1, 0x1f, 0xfff}, + { 0x1054c, 3, 0x1f, 0x924}, + { 0x10558, 1, 0x1f, 0xfff}, + { 0x1055c, 123, 0x1f, 0x924}, + { 0x10750, 2, 0x7, 0x924}, + { 0x10760, 2, 0x7, 0x924}, + { 0x10770, 2, 0x7, 0x924}, + { 0x10780, 2, 0x7, 0x924}, + { 0x10790, 2, 0x1f, 0x924}, + { 0x107a0, 2, 0x7, 0x924}, + { 0x107b0, 2, 0x7, 0x924}, + { 0x107c0, 2, 0x7, 0x924}, + { 0x107d0, 2, 0x7, 0x924}, + { 0x107e0, 2, 0x1f, 0x924}, + { 0x10880, 2, 0x1f, 0x924}, + { 0x10900, 2, 0x1f, 0x924}, + { 0x16000, 1, 0x6, 0x924}, + { 0x16004, 25, 0x1e, 0x924}, + { 0x16070, 8, 0x1e, 0x924}, + { 0x16090, 4, 0xe, 0x924}, + { 0x160a0, 6, 0x1e, 0x924}, + { 0x160c0, 7, 0x1e, 0x924}, + { 0x160dc, 2, 0x6, 0x924}, + { 0x160e4, 6, 0x1e, 0x924}, + { 0x160fc, 4, 0x1e, 0x1fff}, + { 0x1610c, 2, 0x6, 0x924}, + { 0x16114, 6, 0x1e, 0x924}, + { 0x16140, 48, 0x1e, 0x1fff}, + { 0x16204, 5, 0x1e, 0x924}, + { 0x18000, 1, 0x1e, 0x924}, + { 0x18008, 1, 0x1e, 0x924}, + { 0x18010, 35, 0x1c, 0x924}, + { 0x180a4, 2, 0x1c, 0x924}, + { 0x180c0, 9, 0x1c, 0x924}, + { 0x180e4, 1, 0xc, 0x924}, + { 0x180e8, 2, 0x1c, 0x924}, + { 0x180f0, 1, 0xc, 0x924}, + { 0x180f4, 79, 0x1c, 0x924}, + { 0x18230, 1, 0xc, 0x924}, + { 0x18234, 2, 0x1c, 0x924}, + { 0x1823c, 1, 0xc, 0x924}, + { 0x18240, 13, 0x1c, 0x924}, + { 0x18274, 1, 0x4, 0x924}, + { 0x18278, 12, 0x1c, 0x924}, + { 0x182a8, 1, 0x1c, 0xfff}, + { 0x182ac, 3, 0x1c, 0x924}, + { 0x182b8, 1, 0x1c, 0xfff}, + { 0x182bc, 19, 0x1c, 0x924}, + { 0x18308, 1, 0x1c, 0xfff}, + { 0x1830c, 3, 0x1c, 0x924}, + { 0x18318, 1, 0x1c, 0xfff}, + { 0x1831c, 7, 0x1c, 0x924}, + { 0x18338, 1, 0x1c, 0xfff}, + { 0x1833c, 3, 0x1c, 0x924}, + { 0x18348, 1, 0x1c, 0xfff}, + { 0x1834c, 28, 0x1c, 0x924}, + { 0x183bc, 2, 0x1c, 0x1fff}, + { 0x183c8, 3, 0x1c, 0x1fff}, + { 0x183d8, 1, 0x1c, 0x1fff}, + { 0x18440, 48, 0x1c, 0x1fff}, + { 0x18500, 15, 0x1c, 0x924}, + { 0x18570, 1, 0x18, 0xfff}, + { 0x18574, 1, 0x18, 0x924}, + { 0x18578, 1, 0x18, 0xfff}, + { 0x1857c, 4, 0x18, 0x924}, + { 0x1858c, 1, 0x18, 0xfff}, + { 0x18590, 1, 0x18, 0x924}, + { 0x18594, 1, 0x18, 0xfff}, + { 0x18598, 32, 0x18, 0x924}, + { 0x18618, 5, 0x10, 0x924}, + { 0x1862c, 4, 0x10, 0xfff}, + { 0x1863c, 16, 0x10, 0x924}, + { 0x18680, 44, 0x10, 0x924}, + { 0x18748, 12, 0x10, 0x924}, + { 0x18788, 1, 0x10, 0x924}, + { 0x1879c, 6, 0x10, 0x924}, + { 0x187c4, 51, 0x10, 0x924}, + { 0x18a00, 48, 0x10, 0x924}, + { 0x20000, 24, 0x1f, 0x924}, + { 0x20060, 8, 0x1f, 0x9e4}, + { 0x20080, 94, 0x1f, 0x924}, + { 0x201f8, 1, 0x3, 0x924}, + { 0x201fc, 1, 0x1f, 0x924}, + { 0x20200, 1, 0x3, 0x924}, + { 0x20204, 1, 0x1f, 0x924}, + { 0x20208, 1, 0x3, 0x924}, + { 0x2020c, 4, 0x1f, 0x924}, + { 0x2021c, 11, 0x1f, 0xfff}, + { 0x20248, 24, 0x1f, 0x924}, + { 0x202b8, 2, 0x1f, 0x1fff}, + { 0x202c4, 1, 0x1f, 0x1fff}, + { 0x202c8, 1, 0x1c, 0x924}, + { 0x202d8, 4, 0x1c, 0x924}, + { 0x202f0, 1, 0x10, 0x924}, + { 0x20400, 1, 0x1f, 0x924}, + { 0x20404, 1, 0x1f, 0xfff}, + { 0x2040c, 2, 0x1f, 0xfff}, + { 0x20414, 2, 0x1f, 0x924}, + { 0x2041c, 2, 0x1f, 0xfff}, + { 0x20424, 2, 0x1f, 0x924}, + { 0x2042c, 18, 0x1e, 0x924}, + { 0x20480, 1, 0x1f, 0x924}, + { 0x20500, 1, 0x1f, 0x924}, + { 0x20600, 1, 0x1f, 0x924}, + { 0x28000, 1, 0x1f, 0x9e4}, + { 0x28004, 255, 0x1f, 0x180}, + { 0x28400, 1, 0x1f, 0x1c0}, + { 0x28404, 255, 0x1f, 0x180}, + { 0x28800, 1, 0x1f, 0x1c0}, + { 0x28804, 255, 0x1f, 0x180}, + { 0x28c00, 1, 0x1f, 0x1c0}, + { 0x28c04, 255, 0x1f, 0x180}, + { 0x29000, 1, 0x1f, 0x1c0}, + { 0x29004, 255, 0x1f, 0x180}, + { 0x29400, 1, 0x1f, 0x1c0}, + { 0x29404, 255, 0x1f, 0x180}, + { 0x29800, 1, 0x1f, 0x1c0}, + { 0x29804, 255, 0x1f, 0x180}, + { 0x29c00, 1, 0x1f, 0x1c0}, + { 0x29c04, 255, 0x1f, 0x180}, + { 0x2a000, 1, 0x1f, 0x1c0}, + { 0x2a004, 255, 0x1f, 0x180}, + { 0x2a400, 1, 0x1f, 0x1c0}, + { 0x2a404, 255, 0x1f, 0x180}, + { 0x2a800, 1, 0x1f, 0x1c0}, + { 0x2a804, 255, 0x1f, 0x180}, + { 0x2ac00, 1, 0x1f, 0x1c0}, + { 0x2ac04, 255, 0x1f, 0x180}, + { 0x2b000, 1, 0x1f, 0x1c0}, + { 0x2b004, 255, 0x1f, 0x180}, + { 0x2b400, 1, 0x1f, 0x1c0}, + { 0x2b404, 255, 0x1f, 0x180}, + { 0x2b800, 1, 0x1f, 0x1c0}, + { 0x2b804, 255, 0x1f, 0x180}, + { 0x2bc00, 1, 0x1f, 0x1c0}, + { 0x2bc04, 255, 0x1f, 0x180}, + { 0x2c000, 1, 0x1f, 0x1c0}, + { 0x2c004, 255, 0x1f, 0x180}, + { 0x2c400, 1, 0x1f, 0x1c0}, + { 0x2c404, 255, 0x1f, 0x180}, + { 0x2c800, 1, 0x1f, 0x1c0}, + { 0x2c804, 255, 0x1f, 0x180}, + { 0x2cc00, 1, 0x1f, 0x1c0}, + { 0x2cc04, 255, 0x1f, 0x180}, + { 0x2d000, 1, 0x1f, 0x1c0}, + { 0x2d004, 255, 0x1f, 0x180}, + { 0x2d400, 1, 0x1f, 0x1c0}, + { 0x2d404, 255, 0x1f, 0x180}, + { 0x2d800, 1, 0x1f, 0x1c0}, + { 0x2d804, 255, 0x1f, 0x180}, + { 0x2dc00, 1, 0x1f, 0x1c0}, + { 0x2dc04, 255, 0x1f, 0x180}, + { 0x2e000, 1, 0x1f, 0x1c0}, + { 0x2e004, 255, 0x1f, 0x180}, + { 0x2e400, 1, 0x1f, 0x1c0}, + { 0x2e404, 255, 0x1f, 0x180}, + { 0x2e800, 1, 0x1f, 0x1c0}, + { 0x2e804, 255, 0x1f, 0x180}, + { 0x2ec00, 1, 0x1f, 0x1c0}, + { 0x2ec04, 255, 0x1f, 0x180}, + { 0x2f000, 1, 0x1f, 0x1c0}, + { 0x2f004, 255, 0x1f, 0x180}, + { 0x2f400, 1, 0x1f, 0x1c0}, + { 0x2f404, 255, 0x1f, 0x180}, + { 0x2f800, 1, 0x1f, 0x1c0}, + { 0x2f804, 255, 0x1f, 0x180}, + { 0x2fc00, 1, 0x1f, 0x1c0}, + { 0x2fc04, 255, 0x1f, 0x180}, + { 0x30000, 1, 0x1f, 0x9e4}, + { 0x30004, 255, 0x1f, 0x180}, + { 0x30400, 1, 0x1f, 0x1c0}, + { 0x30404, 255, 0x1f, 0x180}, + { 0x30800, 1, 0x1f, 0x1c0}, + { 0x30804, 255, 0x1f, 0x180}, + { 0x30c00, 1, 0x1f, 0x1c0}, + { 0x30c04, 255, 0x1f, 0x180}, + { 0x31000, 1, 0x1f, 0x1c0}, + { 0x31004, 255, 0x1f, 0x180}, + { 0x31400, 1, 0x1f, 0x1c0}, + { 0x31404, 255, 0x1f, 0x180}, + { 0x31800, 1, 0x1f, 0x1c0}, + { 0x31804, 255, 0x1f, 0x180}, + { 0x31c00, 1, 0x1f, 0x1c0}, + { 0x31c04, 255, 0x1f, 0x180}, + { 0x32000, 1, 0x1f, 0x1c0}, + { 0x32004, 255, 0x1f, 0x180}, + { 0x32400, 1, 0x1f, 0x1c0}, + { 0x32404, 255, 0x1f, 0x180}, + { 0x32800, 1, 0x1f, 0x1c0}, + { 0x32804, 255, 0x1f, 0x180}, + { 0x32c00, 1, 0x1f, 0x1c0}, + { 0x32c04, 255, 0x1f, 0x180}, + { 0x33000, 1, 0x1f, 0x1c0}, + { 0x33004, 255, 0x1f, 0x180}, + { 0x33400, 1, 0x1f, 0x1c0}, + { 0x33404, 255, 0x1f, 0x180}, + { 0x33800, 1, 0x1f, 0x1c0}, + { 0x33804, 255, 0x1f, 0x180}, + { 0x33c00, 1, 0x1f, 0x1c0}, + { 0x33c04, 255, 0x1f, 0x180}, + { 0x34000, 1, 0x1f, 0x1c0}, + { 0x34004, 255, 0x1f, 0x180}, + { 0x34400, 1, 0x1f, 0x1c0}, + { 0x34404, 255, 0x1f, 0x180}, + { 0x34800, 1, 0x1f, 0x1c0}, + { 0x34804, 255, 0x1f, 0x180}, + { 0x34c00, 1, 0x1f, 0x1c0}, + { 0x34c04, 255, 0x1f, 0x180}, + { 0x35000, 1, 0x1f, 0x1c0}, + { 0x35004, 255, 0x1f, 0x180}, + { 0x35400, 1, 0x1f, 0x1c0}, + { 0x35404, 255, 0x1f, 0x180}, + { 0x35800, 1, 0x1f, 0x1c0}, + { 0x35804, 255, 0x1f, 0x180}, + { 0x35c00, 1, 0x1f, 0x1c0}, + { 0x35c04, 255, 0x1f, 0x180}, + { 0x36000, 1, 0x1f, 0x1c0}, + { 0x36004, 255, 0x1f, 0x180}, + { 0x36400, 1, 0x1f, 0x1c0}, + { 0x36404, 255, 0x1f, 0x180}, + { 0x36800, 1, 0x1f, 0x1c0}, + { 0x36804, 255, 0x1f, 0x180}, + { 0x36c00, 1, 0x1f, 0x1c0}, + { 0x36c04, 255, 0x1f, 0x180}, + { 0x37000, 1, 0x1f, 0x1c0}, + { 0x37004, 255, 0x1f, 0x180}, + { 0x37400, 1, 0x1f, 0x1c0}, + { 0x37404, 255, 0x1f, 0x180}, + { 0x37800, 1, 0x1f, 0x1c0}, + { 0x37804, 255, 0x1f, 0x180}, + { 0x37c00, 1, 0x1f, 0x1c0}, + { 0x37c04, 255, 0x1f, 0x180}, + { 0x38000, 1, 0x1f, 0x1c0}, + { 0x38004, 255, 0x1f, 0x180}, + { 0x38400, 1, 0x1f, 0x1c0}, + { 0x38404, 255, 0x1f, 0x180}, + { 0x38800, 1, 0x1f, 0x1c0}, + { 0x38804, 255, 0x1f, 0x180}, + { 0x38c00, 1, 0x1f, 0x1c0}, + { 0x38c04, 255, 0x1f, 0x180}, + { 0x39000, 1, 0x1f, 0x1c0}, + { 0x39004, 255, 0x1f, 0x180}, + { 0x39400, 1, 0x1f, 0x1c0}, + { 0x39404, 255, 0x1f, 0x180}, + { 0x39800, 1, 0x1f, 0x1c0}, + { 0x39804, 255, 0x1f, 0x180}, + { 0x39c00, 1, 0x1f, 0x1c0}, + { 0x39c04, 255, 0x1f, 0x180}, + { 0x3a000, 1, 0x1f, 0x1c0}, + { 0x3a004, 255, 0x1f, 0x180}, + { 0x3a400, 1, 0x1f, 0x1c0}, + { 0x3a404, 255, 0x1f, 0x180}, + { 0x3a800, 1, 0x1f, 0x1c0}, + { 0x3a804, 255, 0x1f, 0x180}, + { 0x3ac00, 1, 0x1f, 0x1c0}, + { 0x3ac04, 255, 0x1f, 0x180}, + { 0x3b000, 1, 0x1f, 0x1c0}, + { 0x3b004, 255, 0x1f, 0x180}, + { 0x3b400, 1, 0x1f, 0x1c0}, + { 0x3b404, 255, 0x1f, 0x180}, + { 0x3b800, 1, 0x1f, 0x1c0}, + { 0x3b804, 255, 0x1f, 0x180}, + { 0x3bc00, 1, 0x1f, 0x1c0}, + { 0x3bc04, 255, 0x1f, 0x180}, + { 0x3c000, 1, 0x1f, 0x1c0}, + { 0x3c004, 255, 0x1f, 0x180}, + { 0x3c400, 1, 0x1f, 0x1c0}, + { 0x3c404, 255, 0x1f, 0x180}, + { 0x3c800, 1, 0x1f, 0x1c0}, + { 0x3c804, 255, 0x1f, 0x180}, + { 0x3cc00, 1, 0x1f, 0x1c0}, + { 0x3cc04, 255, 0x1f, 0x180}, + { 0x3d000, 1, 0x1f, 0x1c0}, + { 0x3d004, 255, 0x1f, 0x180}, + { 0x3d400, 1, 0x1f, 0x1c0}, + { 0x3d404, 255, 0x1f, 0x180}, + { 0x3d800, 1, 0x1f, 0x1c0}, + { 0x3d804, 255, 0x1f, 0x180}, + { 0x3dc00, 1, 0x1f, 0x1c0}, + { 0x3dc04, 255, 0x1f, 0x180}, + { 0x3e000, 1, 0x1f, 0x1c0}, + { 0x3e004, 255, 0x1f, 0x180}, + { 0x3e400, 1, 0x1f, 0x1c0}, + { 0x3e404, 255, 0x1f, 0x180}, + { 0x3e800, 1, 0x1f, 0x1c0}, + { 0x3e804, 255, 0x1f, 0x180}, + { 0x3ec00, 1, 0x1f, 0x1c0}, + { 0x3ec04, 255, 0x1f, 0x180}, + { 0x3f000, 1, 0x1f, 0x1c0}, + { 0x3f004, 255, 0x1f, 0x180}, + { 0x3f400, 1, 0x1f, 0x1c0}, + { 0x3f404, 255, 0x1f, 0x180}, + { 0x3f800, 1, 0x1f, 0x1c0}, + { 0x3f804, 255, 0x1f, 0x180}, + { 0x3fc00, 1, 0x1f, 0x1c0}, + { 0x3fc04, 255, 0x1f, 0x180}, + { 0x40000, 85, 0x1f, 0x924}, + { 0x40154, 13, 0x1f, 0xfff}, + { 0x40198, 2, 0x1f, 0x1fff}, + { 0x401a4, 1, 0x1f, 0x1fff}, + { 0x401a8, 8, 0x1e, 0x924}, + { 0x401c8, 1, 0x2, 0x924}, + { 0x401cc, 2, 0x1e, 0x924}, + { 0x401d4, 2, 0x1c, 0x924}, + { 0x40200, 4, 0x1f, 0x924}, + { 0x40220, 6, 0x1c, 0x924}, + { 0x40238, 8, 0xc, 0x924}, + { 0x40258, 4, 0x1c, 0x924}, + { 0x40268, 2, 0x18, 0x924}, + { 0x40270, 17, 0x10, 0x924}, + { 0x40400, 43, 0x1f, 0x924}, + { 0x404bc, 2, 0x1f, 0x1fff}, + { 0x404c8, 1, 0x1f, 0x1fff}, + { 0x404cc, 3, 0x1e, 0x924}, + { 0x404e0, 1, 0x1c, 0x924}, + { 0x40500, 2, 0x1f, 0x924}, + { 0x40510, 2, 0x1f, 0x924}, + { 0x40520, 2, 0x1f, 0x924}, + { 0x40530, 2, 0x1f, 0x924}, + { 0x40540, 2, 0x1f, 0x924}, + { 0x40550, 10, 0x1c, 0x924}, + { 0x40610, 2, 0x1c, 0x924}, + { 0x42000, 164, 0x1f, 0x924}, + { 0x422b0, 2, 0x1f, 0x1fff}, + { 0x422bc, 1, 0x1f, 0x1fff}, + { 0x422c0, 4, 0x1c, 0x924}, + { 0x422d4, 5, 0x1e, 0x924}, + { 0x422e8, 1, 0x1c, 0x924}, + { 0x42400, 49, 0x1f, 0x924}, + { 0x424c8, 32, 0x1f, 0x924}, + { 0x42548, 1, 0x1f, 0xfff}, + { 0x4254c, 1, 0x1f, 0x924}, + { 0x42550, 1, 0x1f, 0xfff}, + { 0x42554, 1, 0x1f, 0x924}, + { 0x42558, 1, 0x1f, 0xfff}, + { 0x4255c, 1, 0x1f, 0x924}, + { 0x42568, 2, 0x1f, 0x924}, + { 0x42640, 5, 0x1c, 0x924}, + { 0x42800, 1, 0x1f, 0x924}, + { 0x50000, 1, 0x1f, 0x1fff}, + { 0x50004, 19, 0x1f, 0x924}, + { 0x50050, 8, 0x1f, 0x93c}, + { 0x50070, 60, 0x1f, 0x924}, + { 0x50160, 8, 0x1f, 0xfff}, + { 0x50180, 20, 0x1f, 0x924}, + { 0x501e0, 2, 0x1f, 0x1fff}, + { 0x501ec, 1, 0x1f, 0x1fff}, + { 0x501f0, 4, 0x1e, 0x924}, + { 0x50200, 1, 0x1f, 0x924}, + { 0x50204, 1, 0x1f, 0xfff}, + { 0x5020c, 2, 0x1f, 0xfff}, + { 0x50214, 2, 0x1f, 0x924}, + { 0x5021c, 1, 0x1f, 0xfff}, + { 0x50220, 2, 0x1f, 0x924}, + { 0x50228, 6, 0x1e, 0x924}, + { 0x50240, 1, 0x1f, 0x924}, + { 0x50280, 1, 0x1f, 0x924}, + { 0x50300, 1, 0x1c, 0x924}, + { 0x5030c, 1, 0x1c, 0x924}, + { 0x50318, 1, 0x1c, 0x934}, + { 0x5031c, 1, 0x1c, 0x924}, + { 0x50320, 2, 0x1c, 0x934}, + { 0x50330, 1, 0x10, 0x924}, + { 0x52000, 1, 0x1f, 0x924}, + { 0x54000, 1, 0x1f, 0x93c}, + { 0x54004, 255, 0x1f, 0x30}, + { 0x54400, 1, 0x1f, 0x38}, + { 0x54404, 255, 0x1f, 0x30}, + { 0x54800, 1, 0x1f, 0x38}, + { 0x54804, 255, 0x1f, 0x30}, + { 0x54c00, 1, 0x1f, 0x38}, + { 0x54c04, 255, 0x1f, 0x30}, + { 0x55000, 1, 0x1f, 0x38}, + { 0x55004, 255, 0x1f, 0x30}, + { 0x55400, 1, 0x1f, 0x38}, + { 0x55404, 255, 0x1f, 0x30}, + { 0x55800, 1, 0x1f, 0x38}, + { 0x55804, 255, 0x1f, 0x30}, + { 0x55c00, 1, 0x1f, 0x38}, + { 0x55c04, 255, 0x1f, 0x30}, + { 0x56000, 1, 0x1f, 0x38}, + { 0x56004, 255, 0x1f, 0x30}, + { 0x56400, 1, 0x1f, 0x38}, + { 0x56404, 255, 0x1f, 0x30}, + { 0x56800, 1, 0x1f, 0x38}, + { 0x56804, 255, 0x1f, 0x30}, + { 0x56c00, 1, 0x1f, 0x38}, + { 0x56c04, 255, 0x1f, 0x30}, + { 0x57000, 1, 0x1f, 0x38}, + { 0x57004, 255, 0x1f, 0x30}, + { 0x58000, 1, 0x1f, 0x934}, + { 0x58004, 8191, 0x3, 0x30}, + { 0x60000, 26, 0x1f, 0x924}, + { 0x60068, 8, 0x3, 0x924}, + { 0x60088, 2, 0x1f, 0x924}, + { 0x60090, 1, 0x1f, 0xfff}, + { 0x60094, 9, 0x1f, 0x924}, + { 0x600b8, 9, 0x3, 0x924}, + { 0x600dc, 1, 0x1f, 0x924}, + { 0x600e0, 5, 0x3, 0x924}, + { 0x600f4, 1, 0x7, 0x924}, + { 0x600f8, 1, 0x3, 0x924}, + { 0x600fc, 8, 0x1f, 0x924}, + { 0x6012c, 2, 0x1f, 0x1fff}, + { 0x60138, 1, 0x1f, 0x1fff}, + { 0x6013c, 24, 0x2, 0x924}, + { 0x6019c, 2, 0x1c, 0x924}, + { 0x601ac, 18, 0x1c, 0x924}, + { 0x60200, 1, 0x1f, 0xb6d}, + { 0x60204, 2, 0x1f, 0x249}, + { 0x60210, 13, 0x1c, 0x924}, + { 0x60244, 16, 0x10, 0x924}, + { 0x61000, 1, 0x1f, 0xb6d}, + { 0x61004, 511, 0x1f, 0x249}, + { 0x61800, 512, 0x18, 0x249}, + { 0x70000, 8, 0x1f, 0xb6d}, + { 0x70020, 8184, 0x1f, 0x249}, + { 0x78000, 8192, 0x18, 0x249}, + { 0x85000, 3, 0x1f, 0x1000}, + { 0x8501c, 7, 0x1f, 0x1000}, + { 0x85048, 1, 0x1f, 0x1000}, + { 0x85200, 32, 0x1f, 0x1000}, + { 0xa0000, 16384, 0x3, 0x1000}, + { 0xb0000, 16384, 0x2, 0x1000}, + { 0xc1000, 7, 0x1f, 0x924}, + { 0xc102c, 2, 0x1f, 0x1fff}, + { 0xc1038, 1, 0x1f, 0x1fff}, + { 0xc103c, 2, 0x1c, 0x924}, + { 0xc1800, 2, 0x1f, 0x924}, + { 0xc2000, 164, 0x1f, 0x924}, + { 0xc22b0, 2, 0x1f, 0x1fff}, + { 0xc22bc, 1, 0x1f, 0x1fff}, + { 0xc22c0, 5, 0x1c, 0x924}, + { 0xc22d8, 4, 0x1c, 0x924}, + { 0xc2400, 49, 0x1f, 0x924}, + { 0xc24c8, 32, 0x1f, 0x924}, + { 0xc2548, 1, 0x1f, 0xfff}, + { 0xc254c, 1, 0x1f, 0x924}, + { 0xc2550, 1, 0x1f, 0xfff}, + { 0xc2554, 1, 0x1f, 0x924}, + { 0xc2558, 1, 0x1f, 0xfff}, + { 0xc255c, 1, 0x1f, 0x924}, + { 0xc2568, 2, 0x1f, 0x924}, + { 0xc2600, 1, 0x1f, 0x924}, + { 0xc4000, 165, 0x1f, 0x924}, + { 0xc42b4, 2, 0x1f, 0x1fff}, + { 0xc42c0, 1, 0x1f, 0x1fff}, + { 0xc42d8, 2, 0x1c, 0x924}, + { 0xc42e0, 7, 0x1e, 0x924}, + { 0xc42fc, 1, 0x1c, 0x924}, + { 0xc4400, 51, 0x1f, 0x924}, + { 0xc44d0, 32, 0x1f, 0x924}, + { 0xc4550, 1, 0x1f, 0xfff}, + { 0xc4554, 1, 0x1f, 0x924}, + { 0xc4558, 1, 0x1f, 0xfff}, + { 0xc455c, 1, 0x1f, 0x924}, + { 0xc4560, 1, 0x1f, 0xfff}, + { 0xc4564, 1, 0x1f, 0x924}, + { 0xc4570, 2, 0x1f, 0x924}, + { 0xc4578, 5, 0x1c, 0x924}, + { 0xc4600, 1, 0x1f, 0x924}, + { 0xd0000, 19, 0x1f, 0x924}, + { 0xd004c, 8, 0x1f, 0x1927}, + { 0xd006c, 64, 0x1f, 0x924}, + { 0xd016c, 8, 0x1f, 0xfff}, + { 0xd018c, 19, 0x1f, 0x924}, + { 0xd01e8, 2, 0x1f, 0x1fff}, + { 0xd01f4, 1, 0x1f, 0x1fff}, + { 0xd01fc, 1, 0x1c, 0x924}, + { 0xd0200, 1, 0x1f, 0x924}, + { 0xd0204, 1, 0x1f, 0xfff}, + { 0xd020c, 3, 0x1f, 0xfff}, + { 0xd0218, 4, 0x1f, 0x924}, + { 0xd0228, 18, 0x1e, 0x924}, + { 0xd0280, 1, 0x1f, 0x924}, + { 0xd0300, 1, 0x1f, 0x924}, + { 0xd0400, 1, 0x1f, 0x924}, + { 0xd0818, 1, 0x10, 0x924}, + { 0xd4000, 1, 0x1f, 0x1927}, + { 0xd4004, 255, 0x1f, 0x6}, + { 0xd4400, 1, 0x1f, 0x1007}, + { 0xd4404, 255, 0x1f, 0x6}, + { 0xd4800, 1, 0x1f, 0x1007}, + { 0xd4804, 255, 0x1f, 0x6}, + { 0xd4c00, 1, 0x1f, 0x1007}, + { 0xd4c04, 255, 0x1f, 0x6}, + { 0xd5000, 1, 0x1f, 0x1007}, + { 0xd5004, 255, 0x1f, 0x6}, + { 0xd5400, 1, 0x1f, 0x1007}, + { 0xd5404, 255, 0x1f, 0x6}, + { 0xd5800, 1, 0x1f, 0x1007}, + { 0xd5804, 255, 0x1f, 0x6}, + { 0xd5c00, 1, 0x1f, 0x1007}, + { 0xd5c04, 255, 0x1f, 0x6}, + { 0xd6000, 1, 0x1f, 0x1007}, + { 0xd6004, 255, 0x1f, 0x6}, + { 0xd6400, 1, 0x1f, 0x1007}, + { 0xd6404, 255, 0x1f, 0x6}, + { 0xd8000, 1, 0x1f, 0x1927}, + { 0xd8004, 255, 0x1f, 0x6}, + { 0xd8400, 1, 0x1f, 0x1007}, + { 0xd8404, 255, 0x1f, 0x6}, + { 0xd8800, 1, 0x1f, 0x1007}, + { 0xd8804, 255, 0x1f, 0x6}, + { 0xd8c00, 1, 0x1f, 0x1007}, + { 0xd8c04, 255, 0x1f, 0x6}, + { 0xd9000, 1, 0x1f, 0x1007}, + { 0xd9004, 255, 0x1f, 0x6}, + { 0xd9400, 1, 0x1f, 0x1007}, + { 0xd9404, 255, 0x1f, 0x6}, + { 0xd9800, 1, 0x1f, 0x1007}, + { 0xd9804, 255, 0x1f, 0x6}, + { 0xd9c00, 1, 0x1f, 0x1007}, + { 0xd9c04, 255, 0x1f, 0x6}, + { 0xda000, 1, 0x1f, 0x1007}, + { 0xda004, 255, 0x1f, 0x6}, + { 0xda400, 1, 0x1f, 0x1007}, + { 0xda404, 255, 0x1f, 0x6}, + { 0xda800, 1, 0x1f, 0x1007}, + { 0xda804, 255, 0x1f, 0x6}, + { 0xdac00, 1, 0x1f, 0x1007}, + { 0xdac04, 255, 0x1f, 0x6}, + { 0xdb000, 1, 0x1f, 0x1007}, + { 0xdb004, 255, 0x1f, 0x6}, + { 0xdb400, 1, 0x1f, 0x1007}, + { 0xdb404, 255, 0x1f, 0x6}, + { 0xdb800, 1, 0x1f, 0x1007}, + { 0xdb804, 255, 0x1f, 0x6}, + { 0xdbc00, 1, 0x1f, 0x1007}, + { 0xdbc04, 255, 0x1f, 0x6}, + { 0xdc000, 1, 0x1f, 0x1007}, + { 0xdc004, 255, 0x1f, 0x6}, + { 0xdc400, 1, 0x1f, 0x1007}, + { 0xdc404, 255, 0x1f, 0x6}, + { 0xdc800, 1, 0x1f, 0x1007}, + { 0xdc804, 255, 0x1f, 0x6}, + { 0xdcc00, 1, 0x1f, 0x1007}, + { 0xdcc04, 255, 0x1f, 0x6}, + { 0xdd000, 1, 0x1f, 0x1007}, + { 0xdd004, 255, 0x1f, 0x6}, + { 0xdd400, 1, 0x1f, 0x1007}, + { 0xdd404, 255, 0x1f, 0x6}, + { 0xdd800, 1, 0x1f, 0x1007}, + { 0xdd804, 255, 0x1f, 0x6}, + { 0xddc00, 1, 0x1f, 0x1007}, + { 0xddc04, 255, 0x1f, 0x6}, + { 0xde000, 1, 0x1f, 0x1007}, + { 0xde004, 255, 0x1f, 0x6}, + { 0xde400, 1, 0x1f, 0x1007}, + { 0xde404, 255, 0x1f, 0x6}, + { 0xde800, 1, 0x1f, 0x1007}, + { 0xde804, 255, 0x1f, 0x6}, + { 0xdec00, 1, 0x1f, 0x1007}, + { 0xdec04, 255, 0x1f, 0x6}, + { 0xdf000, 1, 0x1f, 0x1007}, + { 0xdf004, 255, 0x1f, 0x6}, + { 0xdf400, 1, 0x1f, 0x1007}, + { 0xdf404, 255, 0x1f, 0x6}, + { 0xdf800, 1, 0x1f, 0x1007}, + { 0xdf804, 255, 0x1f, 0x6}, + { 0xdfc00, 1, 0x1f, 0x1007}, + { 0xdfc04, 255, 0x1f, 0x6}, + { 0xe0000, 21, 0x1f, 0x924}, + { 0xe0054, 8, 0x1f, 0xf24}, + { 0xe0074, 49, 0x1f, 0x924}, + { 0xe0138, 1, 0x3, 0x924}, + { 0xe013c, 6, 0x1f, 0x924}, + { 0xe0154, 8, 0x1f, 0xfff}, + { 0xe0174, 21, 0x1f, 0x924}, + { 0xe01d8, 2, 0x1f, 0x1fff}, + { 0xe01e4, 1, 0x1f, 0x1fff}, + { 0xe01f4, 1, 0x4, 0x924}, + { 0xe01f8, 1, 0x1c, 0x924}, + { 0xe0200, 1, 0x1f, 0x924}, + { 0xe0204, 1, 0x1f, 0xfff}, + { 0xe020c, 2, 0x1f, 0xfff}, + { 0xe0214, 2, 0x1f, 0x924}, + { 0xe021c, 2, 0x1f, 0xfff}, + { 0xe0224, 2, 0x1f, 0x924}, + { 0xe022c, 18, 0x1e, 0x924}, + { 0xe0280, 1, 0x1f, 0x924}, + { 0xe0300, 1, 0x1f, 0x924}, + { 0xe0400, 1, 0x10, 0x924}, + { 0xe1000, 1, 0x1f, 0x924}, + { 0xe2000, 1, 0x1f, 0xf24}, + { 0xe2004, 255, 0x1f, 0xc00}, + { 0xe2400, 1, 0x1f, 0xe00}, + { 0xe2404, 255, 0x1f, 0xc00}, + { 0xe2800, 1, 0x1f, 0xe00}, + { 0xe2804, 255, 0x1f, 0xc00}, + { 0xe2c00, 1, 0x1f, 0xe00}, + { 0xe2c04, 255, 0x1f, 0xc00}, + { 0xe3000, 1, 0x1f, 0xe00}, + { 0xe3004, 255, 0x1f, 0xc00}, + { 0xe3400, 1, 0x1f, 0xe00}, + { 0xe3404, 255, 0x1f, 0xc00}, + { 0xe3800, 1, 0x1f, 0xe00}, + { 0xe3804, 255, 0x1f, 0xc00}, + { 0xe3c00, 1, 0x1f, 0xe00}, + { 0xe3c04, 255, 0x1f, 0xc00}, + { 0xf0000, 1, 0x1f, 0xf24}, + { 0xf0004, 255, 0x1f, 0xc00}, + { 0xf0400, 1, 0x1f, 0xe00}, + { 0xf0404, 255, 0x1f, 0xc00}, + { 0xf0800, 1, 0x1f, 0xe00}, + { 0xf0804, 255, 0x1f, 0xc00}, + { 0xf0c00, 1, 0x1f, 0xe00}, + { 0xf0c04, 255, 0x1f, 0xc00}, + { 0xf1000, 1, 0x1f, 0xe00}, + { 0xf1004, 255, 0x1f, 0xc00}, + { 0xf1400, 1, 0x1f, 0xe00}, + { 0xf1404, 255, 0x1f, 0xc00}, + { 0xf1800, 1, 0x1f, 0xe00}, + { 0xf1804, 255, 0x1f, 0xc00}, + { 0xf1c00, 1, 0x1f, 0xe00}, + { 0xf1c04, 255, 0x1f, 0xc00}, + { 0xf2000, 1, 0x1f, 0xe00}, + { 0xf2004, 255, 0x1f, 0xc00}, + { 0xf2400, 1, 0x1f, 0xe00}, + { 0xf2404, 255, 0x1f, 0xc00}, + { 0xf2800, 1, 0x1f, 0xe00}, + { 0xf2804, 255, 0x1f, 0xc00}, + { 0xf2c00, 1, 0x1f, 0xe00}, + { 0xf2c04, 255, 0x1f, 0xc00}, + { 0xf3000, 1, 0x1f, 0xe00}, + { 0xf3004, 255, 0x1f, 0xc00}, + { 0xf3400, 1, 0x1f, 0xe00}, + { 0xf3404, 255, 0x1f, 0xc00}, + { 0xf3800, 1, 0x1f, 0xe00}, + { 0xf3804, 255, 0x1f, 0xc00}, + { 0xf3c00, 1, 0x1f, 0xe00}, + { 0xf3c04, 255, 0x1f, 0xc00}, + { 0xf4000, 1, 0x1f, 0xe00}, + { 0xf4004, 255, 0x1f, 0xc00}, + { 0xf4400, 1, 0x1f, 0xe00}, + { 0xf4404, 255, 0x1f, 0xc00}, + { 0xf4800, 1, 0x1f, 0xe00}, + { 0xf4804, 255, 0x1f, 0xc00}, + { 0xf4c00, 1, 0x1f, 0xe00}, + { 0xf4c04, 255, 0x1f, 0xc00}, + { 0xf5000, 1, 0x1f, 0xe00}, + { 0xf5004, 255, 0x1f, 0xc00}, + { 0xf5400, 1, 0x1f, 0xe00}, + { 0xf5404, 255, 0x1f, 0xc00}, + { 0xf5800, 1, 0x1f, 0xe00}, + { 0xf5804, 255, 0x1f, 0xc00}, + { 0xf5c00, 1, 0x1f, 0xe00}, + { 0xf5c04, 255, 0x1f, 0xc00}, + { 0xf6000, 1, 0x1f, 0xe00}, + { 0xf6004, 255, 0x1f, 0xc00}, + { 0xf6400, 1, 0x1f, 0xe00}, + { 0xf6404, 255, 0x1f, 0xc00}, + { 0xf6800, 1, 0x1f, 0xe00}, + { 0xf6804, 255, 0x1f, 0xc00}, + { 0xf6c00, 1, 0x1f, 0xe00}, + { 0xf6c04, 255, 0x1f, 0xc00}, + { 0xf7000, 1, 0x1f, 0xe00}, + { 0xf7004, 255, 0x1f, 0xc00}, + { 0xf7400, 1, 0x1f, 0xe00}, + { 0xf7404, 255, 0x1f, 0xc00}, + { 0xf7800, 1, 0x1f, 0xe00}, + { 0xf7804, 255, 0x1f, 0xc00}, + { 0xf7c00, 1, 0x1f, 0xe00}, + { 0xf7c04, 255, 0x1f, 0xc00}, + { 0xf8000, 1, 0x1f, 0xe00}, + { 0xf8004, 255, 0x1f, 0xc00}, + { 0xf8400, 1, 0x1f, 0xe00}, + { 0xf8404, 255, 0x1f, 0xc00}, + { 0xf8800, 1, 0x1f, 0xe00}, + { 0xf8804, 255, 0x1f, 0xc00}, + { 0xf8c00, 1, 0x1f, 0xe00}, + { 0xf8c04, 255, 0x1f, 0xc00}, + { 0xf9000, 1, 0x1f, 0xe00}, + { 0xf9004, 255, 0x1f, 0xc00}, + { 0xf9400, 1, 0x1f, 0xe00}, + { 0xf9404, 255, 0x1f, 0xc00}, + { 0xf9800, 1, 0x1f, 0xe00}, + { 0xf9804, 255, 0x1f, 0xc00}, + { 0xf9c00, 1, 0x1f, 0xe00}, + { 0xf9c04, 255, 0x1f, 0xc00}, + { 0xfa000, 1, 0x1f, 0xe00}, + { 0xfa004, 255, 0x1f, 0xc00}, + { 0xfa400, 1, 0x1f, 0xe00}, + { 0xfa404, 255, 0x1f, 0xc00}, + { 0xfa800, 1, 0x1f, 0xe00}, + { 0xfa804, 255, 0x1f, 0xc00}, + { 0xfac00, 1, 0x1f, 0xe00}, + { 0xfac04, 255, 0x1f, 0xc00}, + { 0xfb000, 1, 0x1f, 0xe00}, + { 0xfb004, 255, 0x1f, 0xc00}, + { 0xfb400, 1, 0x1f, 0xe00}, + { 0xfb404, 255, 0x1f, 0xc00}, + { 0xfb800, 1, 0x1f, 0xe00}, + { 0xfb804, 255, 0x1f, 0xc00}, + { 0xfbc00, 1, 0x1f, 0xe00}, + { 0xfbc04, 255, 0x1f, 0xc00}, + { 0xfc000, 1, 0x1f, 0xe00}, + { 0xfc004, 255, 0x1f, 0xc00}, + { 0xfc400, 1, 0x1f, 0xe00}, + { 0xfc404, 255, 0x1f, 0xc00}, + { 0xfc800, 1, 0x1f, 0xe00}, + { 0xfc804, 255, 0x1f, 0xc00}, + { 0xfcc00, 1, 0x1f, 0xe00}, + { 0xfcc04, 255, 0x1f, 0xc00}, + { 0xfd000, 1, 0x1f, 0xe00}, + { 0xfd004, 255, 0x1f, 0xc00}, + { 0xfd400, 1, 0x1f, 0xe00}, + { 0xfd404, 255, 0x1f, 0xc00}, + { 0xfd800, 1, 0x1f, 0xe00}, + { 0xfd804, 255, 0x1f, 0xc00}, + { 0xfdc00, 1, 0x1f, 0xe00}, + { 0xfdc04, 255, 0x1f, 0xc00}, + { 0xfe000, 1, 0x1f, 0xe00}, + { 0xfe004, 255, 0x1f, 0xc00}, + { 0xfe400, 1, 0x1f, 0xe00}, + { 0xfe404, 255, 0x1f, 0xc00}, + { 0xfe800, 1, 0x1f, 0xe00}, + { 0xfe804, 255, 0x1f, 0xc00}, + { 0xfec00, 1, 0x1f, 0xe00}, + { 0xfec04, 255, 0x1f, 0xc00}, + { 0xff000, 1, 0x1f, 0xe00}, + { 0xff004, 255, 0x1f, 0xc00}, + { 0xff400, 1, 0x1f, 0xe00}, + { 0xff404, 255, 0x1f, 0xc00}, + { 0xff800, 1, 0x1f, 0xe00}, + { 0xff804, 255, 0x1f, 0xc00}, + { 0xffc00, 1, 0x1f, 0xe00}, + { 0xffc04, 255, 0x1f, 0xc00}, + { 0x101000, 5, 0x1f, 0x924}, + { 0x101014, 1, 0x1f, 0xfff}, + { 0x101018, 6, 0x1f, 0x924}, + { 0x101040, 2, 0x1f, 0x1fff}, + { 0x10104c, 1, 0x1f, 0x1fff}, + { 0x101050, 1, 0x1e, 0x924}, + { 0x101054, 3, 0x1c, 0x924}, + { 0x101100, 1, 0x1f, 0x924}, + { 0x101800, 8, 0x1f, 0x924}, + { 0x102000, 18, 0x1f, 0x924}, + { 0x102058, 2, 0x1f, 0x1fff}, + { 0x102064, 1, 0x1f, 0x1fff}, + { 0x102068, 6, 0x1c, 0x924}, + { 0x102080, 16, 0x1f, 0xfff}, + { 0x1020c0, 1, 0x1f, 0x924}, + { 0x1020c8, 8, 0x2, 0x924}, + { 0x1020e8, 9, 0x1c, 0x924}, + { 0x102400, 1, 0x1f, 0x924}, + { 0x103000, 1, 0x1f, 0x924}, + { 0x103004, 2, 0x1f, 0xfff}, + { 0x10300c, 23, 0x1f, 0x924}, + { 0x103088, 2, 0x1f, 0x1fff}, + { 0x103094, 1, 0x1f, 0x1fff}, + { 0x103098, 1, 0x1e, 0x924}, + { 0x10309c, 2, 0x1e, 0xfff}, + { 0x1030a4, 2, 0x1e, 0x924}, + { 0x1030ac, 2, 0x1c, 0x924}, + { 0x1030b4, 1, 0x4, 0x924}, + { 0x1030b8, 2, 0x1c, 0xfff}, + { 0x1030c0, 3, 0x1c, 0x924}, + { 0x1030cc, 1, 0x1c, 0xfff}, + { 0x1030d0, 1, 0x1c, 0x924}, + { 0x1030d8, 2, 0x1c, 0x924}, + { 0x1030e0, 1, 0x1c, 0xfff}, + { 0x1030e4, 5, 0x1c, 0x924}, + { 0x103400, 136, 0x1c, 0x1fff}, + { 0x103800, 8, 0x1f, 0x924}, + { 0x104000, 1, 0x1f, 0x924}, + { 0x104004, 1, 0x1f, 0xfff}, + { 0x104008, 4, 0x1f, 0x924}, + { 0x104018, 1, 0x1f, 0xfff}, + { 0x10401c, 1, 0x1f, 0x924}, + { 0x104020, 1, 0x1f, 0xfff}, + { 0x104024, 6, 0x1f, 0x924}, + { 0x10403c, 1, 0x1f, 0xfff}, + { 0x104040, 47, 0x1f, 0x924}, + { 0x10410c, 2, 0x1f, 0x1fff}, + { 0x104118, 1, 0x1f, 0x1fff}, + { 0x10411c, 16, 0x1c, 0x924}, + { 0x104200, 17, 0x1f, 0x924}, + { 0x104400, 1, 0x1f, 0x1fff}, + { 0x104404, 63, 0x1f, 0xfff}, + { 0x104500, 192, 0x1f, 0xdb6}, + { 0x104800, 1, 0x1f, 0x1fff}, + { 0x104804, 63, 0x1f, 0xfff}, + { 0x104900, 192, 0x1f, 0xdb6}, + { 0x105000, 4, 0x1f, 0x1fff}, + { 0x105010, 252, 0x1f, 0xfff}, + { 0x105400, 768, 0x1f, 0xdb6}, + { 0x107000, 7, 0x1c, 0x924}, + { 0x10701c, 1, 0x18, 0x924}, + { 0x108000, 33, 0x3, 0x924}, + { 0x1080ac, 5, 0x2, 0x924}, + { 0x108100, 5, 0x3, 0x924}, + { 0x108120, 5, 0x3, 0x924}, + { 0x108200, 74, 0x3, 0x924}, + { 0x108400, 74, 0x3, 0x924}, + { 0x108800, 152, 0x3, 0x924}, + { 0x110000, 111, 0x1c, 0x924}, + { 0x1101cc, 2, 0x1c, 0x1fff}, + { 0x1101d8, 1, 0x1c, 0x1fff}, + { 0x1101dc, 1, 0x18, 0x924}, + { 0x110200, 4, 0x1c, 0x924}, + { 0x120000, 92, 0x1f, 0x924}, + { 0x120170, 2, 0x3, 0x924}, + { 0x120178, 14, 0x1f, 0x924}, + { 0x1201b0, 2, 0x1f, 0xfff}, + { 0x1201b8, 93, 0x1f, 0x924}, + { 0x12032c, 1, 0x1f, 0xfff}, + { 0x120330, 15, 0x1f, 0x924}, + { 0x12036c, 3, 0x1f, 0xfff}, + { 0x120378, 36, 0x1f, 0x924}, + { 0x120408, 2, 0x1f, 0xfff}, + { 0x120410, 1, 0x1f, 0x924}, + { 0x120414, 15, 0x1f, 0xfff}, + { 0x120450, 10, 0x1f, 0x924}, + { 0x120478, 2, 0x1f, 0xfff}, + { 0x120480, 43, 0x1f, 0x924}, + { 0x12052c, 1, 0x1f, 0xfff}, + { 0x120530, 5, 0x1f, 0x924}, + { 0x120544, 4, 0x3, 0x924}, + { 0x120554, 4, 0x1f, 0x924}, + { 0x120564, 2, 0x1f, 0xfff}, + { 0x12057c, 2, 0x1f, 0x1fff}, + { 0x120588, 3, 0x1f, 0x1fff}, + { 0x120598, 1, 0x1f, 0x1fff}, + { 0x12059c, 22, 0x1e, 0x924}, + { 0x1205f4, 1, 0x6, 0x924}, + { 0x1205f8, 4, 0x1c, 0x924}, + { 0x120618, 1, 0x1c, 0x924}, + { 0x12061c, 31, 0x1e, 0x924}, + { 0x120698, 3, 0x1c, 0x924}, + { 0x1206a4, 1, 0x4, 0x924}, + { 0x1206a8, 1, 0x1c, 0x924}, + { 0x1206b0, 38, 0x1c, 0x924}, + { 0x120748, 1, 0x1c, 0xfff}, + { 0x12074c, 11, 0x1c, 0x924}, + { 0x120778, 2, 0x1c, 0xfff}, + { 0x120780, 23, 0x1c, 0x924}, + { 0x1207dc, 1, 0x4, 0x924}, + { 0x1207fc, 1, 0x1c, 0x924}, + { 0x12080c, 2, 0x1f, 0xfff}, + { 0x120814, 1, 0x1f, 0x924}, + { 0x120818, 1, 0x1f, 0xfff}, + { 0x12081c, 1, 0x1f, 0x924}, + { 0x120820, 1, 0x1f, 0xfff}, + { 0x120824, 1, 0x1f, 0x924}, + { 0x120828, 1, 0x1f, 0xfff}, + { 0x12082c, 1, 0x1f, 0x924}, + { 0x120830, 1, 0x1f, 0xfff}, + { 0x120834, 1, 0x1f, 0x924}, + { 0x120838, 1, 0x1f, 0xfff}, + { 0x12083c, 1, 0x1f, 0x924}, + { 0x120840, 1, 0x1f, 0xfff}, + { 0x120844, 1, 0x1f, 0x924}, + { 0x120848, 1, 0x1f, 0xfff}, + { 0x12084c, 1, 0x1f, 0x924}, + { 0x120850, 1, 0x1f, 0xfff}, + { 0x120854, 1, 0x1f, 0x924}, + { 0x120858, 1, 0x1f, 0xfff}, + { 0x12085c, 1, 0x1f, 0x924}, + { 0x120860, 1, 0x1f, 0xfff}, + { 0x120864, 1, 0x1f, 0x924}, + { 0x120868, 1, 0x1f, 0xfff}, + { 0x12086c, 1, 0x1f, 0x924}, + { 0x120870, 1, 0x1f, 0xfff}, + { 0x120874, 1, 0x1f, 0x924}, + { 0x120878, 1, 0x1f, 0xfff}, + { 0x12087c, 1, 0x1f, 0x924}, + { 0x120880, 1, 0x1f, 0xfff}, + { 0x120884, 1, 0x1f, 0x924}, + { 0x120888, 1, 0x1f, 0xfff}, + { 0x12088c, 1, 0x1f, 0x924}, + { 0x120890, 1, 0x1f, 0xfff}, + { 0x120894, 1, 0x1f, 0x924}, + { 0x120898, 1, 0x1f, 0xfff}, + { 0x12089c, 1, 0x1f, 0x924}, + { 0x1208a0, 1, 0x1f, 0xfff}, + { 0x1208a4, 1, 0x1f, 0x924}, + { 0x1208a8, 1, 0x1f, 0xfff}, + { 0x1208ac, 1, 0x1f, 0x924}, + { 0x1208b0, 1, 0x1f, 0xfff}, + { 0x1208b4, 1, 0x1f, 0x924}, + { 0x1208b8, 1, 0x1f, 0xfff}, + { 0x1208bc, 1, 0x1f, 0x924}, + { 0x1208c0, 1, 0x1f, 0xfff}, + { 0x1208c4, 1, 0x1f, 0x924}, + { 0x1208c8, 1, 0x1f, 0xfff}, + { 0x1208cc, 1, 0x1f, 0x924}, + { 0x1208d0, 1, 0x1f, 0xfff}, + { 0x1208d4, 1, 0x1f, 0x924}, + { 0x1208d8, 1, 0x1f, 0xfff}, + { 0x1208dc, 1, 0x1f, 0x924}, + { 0x1208e0, 1, 0x1f, 0xfff}, + { 0x1208e4, 1, 0x1f, 0x924}, + { 0x1208e8, 1, 0x1f, 0xfff}, + { 0x1208ec, 1, 0x1f, 0x924}, + { 0x1208f0, 1, 0x1f, 0xfff}, + { 0x1208f4, 1, 0x1f, 0x924}, + { 0x1208f8, 1, 0x1f, 0xfff}, + { 0x1208fc, 1, 0x1f, 0x924}, + { 0x120900, 1, 0x1f, 0xfff}, + { 0x120904, 1, 0x1f, 0x924}, + { 0x120908, 1, 0x1f, 0xfff}, + { 0x12090c, 1, 0x1f, 0x924}, + { 0x120910, 7, 0x1c, 0x924}, + { 0x120930, 9, 0x1c, 0x924}, + { 0x12095c, 37, 0x18, 0x924}, + { 0x120a00, 2, 0x7, 0x924}, + { 0x120b00, 1, 0x18, 0x924}, + { 0x122000, 2, 0x1f, 0x924}, + { 0x122008, 2046, 0x1, 0x924}, + { 0x128000, 6144, 0x1e, 0x924}, + { 0x130000, 1, 0x1c, 0x1fff}, + { 0x130004, 11, 0x1c, 0x924}, + { 0x130030, 1, 0x1c, 0xfff}, + { 0x130034, 6, 0x1c, 0x924}, + { 0x13004c, 3, 0x1c, 0xfff}, + { 0x130058, 3, 0x1c, 0x924}, + { 0x130064, 2, 0x1c, 0xfff}, + { 0x13006c, 8, 0x1c, 0x924}, + { 0x13009c, 2, 0x1c, 0x1fff}, + { 0x1300a8, 1, 0x1c, 0x1fff}, + { 0x130100, 12, 0x1c, 0x924}, + { 0x130130, 1, 0x1c, 0xfff}, + { 0x130134, 14, 0x1c, 0x924}, + { 0x13016c, 1, 0x1c, 0xfff}, + { 0x130170, 1, 0x1c, 0x924}, + { 0x130180, 1, 0x1c, 0x924}, + { 0x130200, 1, 0x1c, 0x924}, + { 0x130280, 1, 0x1c, 0x924}, + { 0x130300, 1, 0x1c, 0xfff}, + { 0x130304, 4, 0x1c, 0x924}, + { 0x130380, 1, 0x1c, 0x924}, + { 0x130400, 1, 0x1c, 0x924}, + { 0x130480, 1, 0x1c, 0xfff}, + { 0x130484, 4, 0x1c, 0x924}, + { 0x130800, 72, 0x1c, 0x924}, + { 0x131000, 136, 0x1c, 0x924}, + { 0x132000, 148, 0x1c, 0x924}, + { 0x134000, 544, 0x1c, 0x924}, + { 0x140000, 1, 0x1f, 0x924}, + { 0x140004, 9, 0xf, 0x924}, + { 0x140028, 8, 0x1f, 0x924}, + { 0x140048, 5, 0xf, 0x924}, + { 0x14005c, 2, 0xf, 0xfff}, + { 0x140064, 3, 0xf, 0x924}, + { 0x140070, 1, 0x1f, 0x924}, + { 0x140074, 10, 0xf, 0x924}, + { 0x14009c, 1, 0x1f, 0x924}, + { 0x1400a0, 5, 0xf, 0x924}, + { 0x1400b4, 7, 0x1f, 0x924}, + { 0x1400d0, 2, 0xf, 0xfff}, + { 0x1400d8, 2, 0xf, 0x924}, + { 0x1400e0, 1, 0xf, 0xfff}, + { 0x1400e4, 5, 0xf, 0x924}, + { 0x1400f8, 2, 0x1f, 0x924}, + { 0x140100, 5, 0x3, 0x924}, + { 0x140114, 5, 0xf, 0x924}, + { 0x140128, 7, 0x1f, 0x924}, + { 0x140144, 9, 0xf, 0x924}, + { 0x140168, 8, 0x1f, 0x924}, + { 0x140188, 3, 0xf, 0x924}, + { 0x140194, 13, 0x1f, 0x924}, + { 0x1401d8, 2, 0x1f, 0x1fff}, + { 0x1401e4, 1, 0x1f, 0x1fff}, + { 0x140200, 6, 0xf, 0xfff}, + { 0x1402e0, 2, 0xc, 0x924}, + { 0x1402e8, 2, 0x1c, 0x924}, + { 0x1402f0, 9, 0xc, 0x924}, + { 0x140314, 9, 0x10, 0x924}, + { 0x140338, 7, 0x10, 0xfff}, + { 0x140354, 7, 0x10, 0x924}, + { 0x140370, 7, 0x10, 0xfff}, + { 0x14038c, 14, 0x10, 0x924}, + { 0x1404b0, 14, 0x10, 0x924}, + { 0x15c000, 2, 0x1e, 0x924}, + { 0x15c008, 5, 0x2, 0x924}, + { 0x15c020, 8, 0x1c, 0x924}, + { 0x15c040, 1, 0xc, 0x924}, + { 0x15c044, 2, 0x1c, 0x924}, + { 0x15c04c, 8, 0xc, 0x924}, + { 0x15c06c, 8, 0x1c, 0x924}, + { 0x15c090, 13, 0x1c, 0x924}, + { 0x15c0c8, 24, 0x1c, 0x924}, + { 0x15c128, 2, 0xc, 0x924}, + { 0x15c130, 1, 0x1c, 0x924}, + { 0x15c138, 6, 0x1c, 0x924}, + { 0x15c150, 2, 0x18, 0x924}, + { 0x15c158, 2, 0x8, 0x924}, + { 0x15c160, 23, 0x10, 0x924}, + { 0x15c1bc, 6, 0x10, 0xfff}, + { 0x15c1d4, 23, 0x10, 0x924}, + { 0x15c230, 7, 0x10, 0xfff}, + { 0x15c24c, 90, 0x10, 0x924}, + { 0x160004, 6, 0x18, 0x924}, + { 0x16003c, 1, 0x10, 0x924}, + { 0x160040, 6, 0x18, 0x924}, + { 0x16005c, 6, 0x18, 0x924}, + { 0x160074, 1, 0x10, 0x924}, + { 0x160078, 2, 0x18, 0x924}, + { 0x160300, 8, 0x18, 0x924}, + { 0x160330, 6, 0x18, 0x924}, + { 0x160404, 6, 0x18, 0x924}, + { 0x16043c, 1, 0x10, 0x924}, + { 0x160440, 6, 0x18, 0x924}, + { 0x16045c, 6, 0x18, 0x924}, + { 0x160474, 1, 0x10, 0x924}, + { 0x160478, 2, 0x18, 0x924}, + { 0x160700, 8, 0x18, 0x924}, + { 0x160730, 6, 0x18, 0x924}, + { 0x161000, 7, 0x1f, 0x924}, + { 0x16102c, 2, 0x1f, 0x1fff}, + { 0x161038, 1, 0x1f, 0x1fff}, + { 0x16103c, 2, 0x1c, 0x924}, + { 0x161800, 2, 0x1f, 0x924}, + { 0x162000, 54, 0x18, 0x924}, + { 0x162200, 60, 0x18, 0x924}, + { 0x162400, 54, 0x18, 0x924}, + { 0x162600, 60, 0x18, 0x924}, + { 0x162800, 54, 0x18, 0x924}, + { 0x162a00, 60, 0x18, 0x924}, + { 0x162c00, 54, 0x18, 0x924}, + { 0x162e00, 60, 0x18, 0x924}, + { 0x163000, 1, 0x18, 0x924}, + { 0x163008, 1, 0x18, 0x924}, + { 0x163010, 1, 0x18, 0x924}, + { 0x163018, 1, 0x18, 0x924}, + { 0x163020, 5, 0x18, 0x924}, + { 0x163038, 3, 0x18, 0x924}, + { 0x163048, 3, 0x18, 0x924}, + { 0x163058, 1, 0x18, 0x924}, + { 0x163060, 1, 0x18, 0x924}, + { 0x163068, 1, 0x18, 0x924}, + { 0x163070, 3, 0x18, 0x924}, + { 0x163080, 1, 0x18, 0x924}, + { 0x163088, 3, 0x18, 0x924}, + { 0x163098, 1, 0x18, 0x924}, + { 0x1630a0, 1, 0x18, 0x924}, + { 0x1630a8, 1, 0x18, 0x924}, + { 0x1630b0, 2, 0x10, 0x924}, + { 0x1630c0, 1, 0x18, 0x924}, + { 0x1630c8, 1, 0x18, 0x924}, + { 0x1630d0, 1, 0x18, 0x924}, + { 0x1630d8, 1, 0x18, 0x924}, + { 0x1630e0, 2, 0x18, 0x924}, + { 0x163110, 1, 0x18, 0x924}, + { 0x163120, 2, 0x18, 0x924}, + { 0x163420, 4, 0x18, 0x924}, + { 0x163438, 2, 0x18, 0x924}, + { 0x163488, 2, 0x18, 0x924}, + { 0x163520, 2, 0x18, 0x924}, + { 0x163800, 1, 0x18, 0x924}, + { 0x163808, 1, 0x18, 0x924}, + { 0x163810, 1, 0x18, 0x924}, + { 0x163818, 1, 0x18, 0x924}, + { 0x163820, 5, 0x18, 0x924}, + { 0x163838, 3, 0x18, 0x924}, + { 0x163848, 3, 0x18, 0x924}, + { 0x163858, 1, 0x18, 0x924}, + { 0x163860, 1, 0x18, 0x924}, + { 0x163868, 1, 0x18, 0x924}, + { 0x163870, 3, 0x18, 0x924}, + { 0x163880, 1, 0x18, 0x924}, + { 0x163888, 3, 0x18, 0x924}, + { 0x163898, 1, 0x18, 0x924}, + { 0x1638a0, 1, 0x18, 0x924}, + { 0x1638a8, 1, 0x18, 0x924}, + { 0x1638b0, 2, 0x10, 0x924}, + { 0x1638c0, 1, 0x18, 0x924}, + { 0x1638c8, 1, 0x18, 0x924}, + { 0x1638d0, 1, 0x18, 0x924}, + { 0x1638d8, 1, 0x18, 0x924}, + { 0x1638e0, 2, 0x18, 0x924}, + { 0x163910, 1, 0x18, 0x924}, + { 0x163920, 2, 0x18, 0x924}, + { 0x163c20, 4, 0x18, 0x924}, + { 0x163c38, 2, 0x18, 0x924}, + { 0x163c88, 2, 0x18, 0x924}, + { 0x163d20, 2, 0x18, 0x924}, + { 0x164000, 5, 0x1f, 0x924}, + { 0x164014, 2, 0x1f, 0xfff}, + { 0x16401c, 53, 0x1f, 0x924}, + { 0x164100, 2, 0x1f, 0x1fff}, + { 0x16410c, 1, 0x1f, 0x1fff}, + { 0x164110, 2, 0x1e, 0x924}, + { 0x164118, 15, 0x1c, 0x924}, + { 0x164200, 1, 0x1f, 0x924}, + { 0x164208, 1, 0x1f, 0x924}, + { 0x164210, 1, 0x1f, 0x924}, + { 0x164218, 1, 0x1f, 0x924}, + { 0x164220, 1, 0x1f, 0x924}, + { 0x164228, 1, 0x1f, 0x924}, + { 0x164230, 1, 0x1f, 0x924}, + { 0x164238, 1, 0x1f, 0x924}, + { 0x164240, 1, 0x1f, 0x924}, + { 0x164248, 1, 0x1f, 0x924}, + { 0x164250, 1, 0x1f, 0x924}, + { 0x164258, 1, 0x1f, 0x924}, + { 0x164260, 1, 0x1f, 0x924}, + { 0x164270, 2, 0x1f, 0x924}, + { 0x164280, 2, 0x1f, 0x924}, + { 0x164800, 2, 0x1f, 0x924}, + { 0x165000, 2, 0x1f, 0x924}, + { 0x166000, 164, 0x1f, 0x924}, + { 0x1662b0, 2, 0x1f, 0x1fff}, + { 0x1662bc, 1, 0x1f, 0x1fff}, + { 0x1662cc, 7, 0x1c, 0x924}, + { 0x166400, 49, 0x1f, 0x924}, + { 0x1664c8, 32, 0x1f, 0x924}, + { 0x166548, 1, 0x1f, 0xfff}, + { 0x16654c, 1, 0x1f, 0x924}, + { 0x166550, 1, 0x1f, 0xfff}, + { 0x166554, 1, 0x1f, 0x924}, + { 0x166558, 1, 0x1f, 0xfff}, + { 0x16655c, 1, 0x1f, 0x924}, + { 0x166568, 2, 0x1f, 0x924}, + { 0x166570, 5, 0x1c, 0x924}, + { 0x166800, 1, 0x1f, 0x924}, + { 0x168000, 1, 0x1f, 0xfff}, + { 0x168004, 1, 0x1f, 0x924}, + { 0x168008, 1, 0x1f, 0xfff}, + { 0x16800c, 1, 0x1f, 0x924}, + { 0x168010, 1, 0x1f, 0xfff}, + { 0x168014, 1, 0x1f, 0x924}, + { 0x168018, 1, 0x1f, 0xfff}, + { 0x16801c, 3, 0x1f, 0x924}, + { 0x168028, 2, 0x1f, 0xfff}, + { 0x168030, 10, 0x1f, 0x924}, + { 0x168058, 9, 0x1f, 0xfff}, + { 0x16807c, 106, 0x1f, 0x924}, + { 0x168224, 2, 0x3, 0x924}, + { 0x16822c, 3, 0x1f, 0x924}, + { 0x168238, 1, 0x1f, 0xfff}, + { 0x16823c, 25, 0x1f, 0x924}, + { 0x1682a0, 12, 0x3, 0x924}, + { 0x1682d0, 7, 0x1f, 0xfff}, + { 0x1682ec, 5, 0x1f, 0x924}, + { 0x168300, 2, 0x3, 0xfff}, + { 0x168308, 65, 0x1f, 0xfff}, + { 0x16840c, 1, 0x1f, 0x924}, + { 0x168410, 2, 0x1f, 0xfff}, + { 0x168418, 2, 0x3, 0x924}, + { 0x168420, 6, 0x1f, 0x924}, + { 0x168448, 2, 0x1f, 0x1fff}, + { 0x168454, 1, 0x1f, 0x1fff}, + { 0x168800, 19, 0x1f, 0x924}, + { 0x168900, 1, 0x1f, 0x924}, + { 0x168a00, 128, 0x1f, 0xfff}, + { 0x16a000, 1536, 0x1f, 0x924}, + { 0x16c000, 1536, 0x1f, 0x924}, + { 0x16e000, 16, 0x2, 0x924}, + { 0x16e040, 8, 0x1c, 0x924}, + { 0x16e100, 1, 0x2, 0x924}, + { 0x16e200, 2, 0x2, 0xfff}, + { 0x16e400, 1, 0x2, 0x924}, + { 0x16e404, 2, 0x2, 0xfff}, + { 0x16e40c, 94, 0x2, 0x924}, + { 0x16e584, 64, 0x2, 0xfff}, + { 0x16e684, 2, 0x1e, 0xfff}, + { 0x16e68c, 4, 0x2, 0xfff}, + { 0x16e69c, 8, 0x2, 0x924}, + { 0x16e6bc, 4, 0x1e, 0x924}, + { 0x16e6cc, 4, 0x2, 0x924}, + { 0x16e6e0, 2, 0x1c, 0x924}, + { 0x16e6e8, 5, 0xc, 0x924}, + { 0x16e6fc, 4, 0x1c, 0xfff}, + { 0x16e70c, 1, 0x1c, 0x924}, + { 0x16e768, 17, 0x1c, 0x924}, + { 0x16e7ac, 12, 0x10, 0xfff}, + { 0x170000, 24, 0x1f, 0x924}, + { 0x170060, 4, 0x3, 0x924}, + { 0x170070, 13, 0x1f, 0x924}, + { 0x1700a4, 1, 0x1f, 0xfff}, + { 0x1700a8, 1, 0x1f, 0x924}, + { 0x1700ac, 2, 0x1f, 0xfff}, + { 0x1700b4, 3, 0x1f, 0x924}, + { 0x1700c0, 1, 0x1f, 0xfff}, + { 0x1700c4, 44, 0x1f, 0x924}, + { 0x170184, 2, 0x1f, 0x1fff}, + { 0x170190, 1, 0x1f, 0x1fff}, + { 0x170194, 11, 0x1c, 0x924}, + { 0x1701c4, 1, 0x1c, 0x924}, + { 0x1701cc, 7, 0x1c, 0x924}, + { 0x1701e8, 1, 0x18, 0x924}, + { 0x1701ec, 1, 0x1c, 0x924}, + { 0x1701f4, 1, 0x1c, 0x924}, + { 0x170200, 4, 0x1f, 0x924}, + { 0x170214, 1, 0x1f, 0x924}, + { 0x170218, 77, 0x1c, 0x924}, + { 0x170400, 64, 0x1c, 0x924}, + { 0x178000, 1, 0x1f, 0x924}, + { 0x180000, 61, 0x1f, 0x924}, + { 0x180114, 2, 0x1f, 0x1fff}, + { 0x180120, 3, 0x1f, 0x1fff}, + { 0x180130, 1, 0x1f, 0x1fff}, + { 0x18013c, 2, 0x1e, 0x924}, + { 0x180200, 27, 0x1f, 0x924}, + { 0x18026c, 1, 0x1f, 0xfff}, + { 0x180270, 12, 0x1f, 0x924}, + { 0x1802a0, 1, 0x1f, 0xfff}, + { 0x1802a4, 17, 0x1f, 0x924}, + { 0x180340, 4, 0x1f, 0x924}, + { 0x180380, 1, 0x1c, 0x924}, + { 0x180388, 1, 0x1c, 0x924}, + { 0x180390, 1, 0x1c, 0x924}, + { 0x180398, 1, 0x1c, 0x924}, + { 0x1803a0, 5, 0x1c, 0x924}, + { 0x1803b4, 2, 0x18, 0x924}, + { 0x180400, 256, 0x3, 0xfff}, + { 0x181000, 4, 0x1f, 0x93c}, + { 0x181010, 1020, 0x1f, 0x38}, + { 0x182000, 4, 0x18, 0x924}, + { 0x1a0000, 1, 0x1f, 0x92c}, + { 0x1a0004, 5631, 0x1f, 0x8}, + { 0x1a5800, 2560, 0x1e, 0x8}, + { 0x1a8000, 1, 0x1f, 0x92c}, + { 0x1a8004, 8191, 0x1e, 0x8}, + { 0x1b0000, 1, 0x1f, 0x92c}, + { 0x1b0004, 15, 0x2, 0x8}, + { 0x1b0040, 1, 0x1e, 0x92c}, + { 0x1b0044, 239, 0x2, 0x8}, + { 0x1b0400, 1, 0x1f, 0x92c}, + { 0x1b0404, 255, 0x2, 0x8}, + { 0x1b0800, 1, 0x1f, 0x924}, + { 0x1b0840, 1, 0x1e, 0x924}, + { 0x1b0c00, 1, 0x1f, 0x1fff}, + { 0x1b1000, 1, 0x1f, 0x1fff}, + { 0x1b1040, 1, 0x1e, 0x1fff}, + { 0x1b1400, 1, 0x1f, 0x924}, + { 0x1b1440, 1, 0x1e, 0x924}, + { 0x1b1480, 1, 0x1e, 0x924}, + { 0x1b14c0, 1, 0x1e, 0x924}, + { 0x1b1800, 128, 0x1f, 0x10}, + { 0x1b1c00, 128, 0x1f, 0x10}, + { 0x1b2000, 1, 0x1f, 0xdb6}, + { 0x1b2400, 1, 0x1e, 0x92c}, + { 0x1b2404, 5631, 0x1c, 0x8}, + { 0x1b8000, 1, 0x1f, 0xfff}, + { 0x1b8040, 1, 0x1f, 0xfff}, + { 0x1b8080, 1, 0x1f, 0xfff}, + { 0x1b80c0, 1, 0x1f, 0xfff}, + { 0x1b8100, 1, 0x1f, 0x924}, + { 0x1b8140, 1, 0x1f, 0x924}, + { 0x1b8180, 1, 0x1f, 0x924}, + { 0x1b81c0, 1, 0x1f, 0x924}, + { 0x1b8200, 1, 0x1f, 0x924}, + { 0x1b8240, 1, 0x1f, 0x924}, + { 0x1b8280, 1, 0x1f, 0x924}, + { 0x1b82c0, 1, 0x1f, 0x924}, + { 0x1b8300, 1, 0x1f, 0x924}, + { 0x1b8340, 1, 0x1f, 0x924}, + { 0x1b8380, 1, 0x1f, 0x924}, + { 0x1b83c0, 1, 0x1f, 0x924}, + { 0x1b8400, 1, 0x1f, 0x924}, + { 0x1b8440, 1, 0x1f, 0x924}, + { 0x1b8480, 1, 0x1f, 0x924}, + { 0x1b84c0, 1, 0x1f, 0x924}, + { 0x1b8500, 1, 0x1f, 0x924}, + { 0x1b8540, 1, 0x1f, 0x924}, + { 0x1b8580, 1, 0x1f, 0x924}, + { 0x1b85c0, 19, 0x1c, 0x924}, + { 0x1b8800, 1, 0x1f, 0x924}, + { 0x1b8840, 1, 0x1f, 0x924}, + { 0x1b8880, 1, 0x1f, 0x924}, + { 0x1b88c0, 1, 0x1f, 0x924}, + { 0x1b8900, 1, 0x1f, 0x924}, + { 0x1b8940, 1, 0x1f, 0x924}, + { 0x1b8980, 1, 0x1f, 0x924}, + { 0x1b89c0, 1, 0x1f, 0x924}, + { 0x1b8a00, 1, 0x1f, 0x934}, + { 0x1b8a40, 1, 0x1f, 0x924}, + { 0x1b8a80, 1, 0x1f, 0x492}, + { 0x1b8ac0, 1, 0x1f, 0x924}, + { 0x1b8b00, 1, 0x1f, 0x924}, + { 0x1b8b40, 1, 0x1f, 0x924}, + { 0x1b8b80, 1, 0x1f, 0x924}, + { 0x1b8bc0, 1, 0x1f, 0x924}, + { 0x1b8c00, 1, 0x1f, 0x924}, + { 0x1b8c40, 1, 0x1f, 0x924}, + { 0x1b8c80, 1, 0x1f, 0x924}, + { 0x1b8cc0, 1, 0x1f, 0x924}, + { 0x1b8cc4, 1, 0x1c, 0x924}, + { 0x1b8d00, 1, 0x1f, 0x924}, + { 0x1b8d40, 1, 0x1f, 0x924}, + { 0x1b8d80, 1, 0x1f, 0x924}, + { 0x1b8dc0, 1, 0x1f, 0x924}, + { 0x1b8e00, 1, 0x1f, 0x924}, + { 0x1b8e40, 1, 0x1f, 0x924}, + { 0x1b8e80, 1, 0x1f, 0x924}, + { 0x1b8e84, 1, 0x1c, 0x924}, + { 0x1b8ec0, 1, 0x1e, 0x924}, + { 0x1b8f00, 1, 0x1e, 0x924}, + { 0x1b8f40, 1, 0x1e, 0x924}, + { 0x1b8f80, 1, 0x1e, 0x924}, + { 0x1b8fc0, 1, 0x1e, 0x924}, + { 0x1b8fd4, 5, 0x1c, 0x924}, + { 0x1b8fe8, 2, 0x18, 0x924}, + { 0x1b9000, 1, 0x1c, 0x924}, + { 0x1b9040, 3, 0x1c, 0x924}, + { 0x1b905c, 1, 0x18, 0x924}, + { 0x1b9064, 1, 0x10, 0x924}, + { 0x1b9080, 10, 0x10, 0x924}, + { 0x1c0000, 2, 0x1f, 0x924}, + { 0x200000, 65, 0x1f, 0x924}, + { 0x200124, 2, 0x1f, 0x1fff}, + { 0x200130, 3, 0x1f, 0x1fff}, + { 0x200140, 1, 0x1f, 0x1fff}, + { 0x20014c, 2, 0x1e, 0x924}, + { 0x200200, 27, 0x1f, 0x924}, + { 0x20026c, 1, 0x1f, 0xfff}, + { 0x200270, 12, 0x1f, 0x924}, + { 0x2002a0, 1, 0x1f, 0xfff}, + { 0x2002a4, 17, 0x1f, 0x924}, + { 0x200340, 4, 0x1f, 0x924}, + { 0x200380, 1, 0x1c, 0x924}, + { 0x200388, 1, 0x1c, 0x924}, + { 0x200390, 1, 0x1c, 0x924}, + { 0x200398, 1, 0x1c, 0x924}, + { 0x2003a0, 1, 0x1c, 0x924}, + { 0x2003a8, 2, 0x1c, 0x924}, + { 0x200400, 256, 0x3, 0xfff}, + { 0x202000, 4, 0x1f, 0x1927}, + { 0x202010, 2044, 0x1f, 0x1007}, + { 0x204000, 4, 0x18, 0x924}, + { 0x220000, 1, 0x1f, 0x925}, + { 0x220004, 5631, 0x1f, 0x1}, + { 0x225800, 2560, 0x1e, 0x1}, + { 0x228000, 1, 0x1f, 0x925}, + { 0x228004, 8191, 0x1e, 0x1}, + { 0x230000, 1, 0x1f, 0x925}, + { 0x230004, 15, 0x2, 0x1}, + { 0x230040, 1, 0x1e, 0x925}, + { 0x230044, 239, 0x2, 0x1}, + { 0x230400, 1, 0x1f, 0x925}, + { 0x230404, 255, 0x2, 0x1}, + { 0x230800, 1, 0x1f, 0x924}, + { 0x230840, 1, 0x1e, 0x924}, + { 0x230c00, 1, 0x1f, 0x924}, + { 0x231000, 1, 0x1f, 0x924}, + { 0x231040, 1, 0x1e, 0x924}, + { 0x231400, 1, 0x1f, 0x924}, + { 0x231440, 1, 0x1e, 0x924}, + { 0x231480, 1, 0x1e, 0x924}, + { 0x2314c0, 1, 0x1e, 0x924}, + { 0x231800, 128, 0x1f, 0x2}, + { 0x231c00, 128, 0x1f, 0x2}, + { 0x232000, 1, 0x1f, 0xdb6}, + { 0x232400, 1, 0x1e, 0x925}, + { 0x232404, 5631, 0x1c, 0x1}, + { 0x238000, 1, 0x1f, 0xfff}, + { 0x238040, 1, 0x1f, 0xfff}, + { 0x238080, 1, 0x1f, 0xfff}, + { 0x2380c0, 1, 0x1f, 0xfff}, + { 0x238100, 1, 0x1f, 0x924}, + { 0x238140, 1, 0x1f, 0x924}, + { 0x238180, 1, 0x1f, 0x924}, + { 0x2381c0, 1, 0x1f, 0x924}, + { 0x238200, 1, 0x1f, 0x924}, + { 0x238240, 1, 0x1f, 0x924}, + { 0x238280, 1, 0x1f, 0x924}, + { 0x2382c0, 1, 0x1f, 0x924}, + { 0x238300, 1, 0x1f, 0x924}, + { 0x238340, 1, 0x1f, 0x924}, + { 0x238380, 1, 0x1f, 0x924}, + { 0x2383c0, 1, 0x1f, 0x924}, + { 0x238400, 1, 0x1f, 0x924}, + { 0x238440, 1, 0x1f, 0x924}, + { 0x238480, 1, 0x1f, 0x924}, + { 0x2384c0, 1, 0x1f, 0x924}, + { 0x238500, 1, 0x1f, 0x924}, + { 0x238540, 1, 0x1f, 0x924}, + { 0x238580, 1, 0x1f, 0x924}, + { 0x2385c0, 19, 0x1c, 0x924}, + { 0x238800, 1, 0x1f, 0x924}, + { 0x238840, 1, 0x1f, 0x924}, + { 0x238880, 1, 0x1f, 0x924}, + { 0x2388c0, 1, 0x1f, 0x924}, + { 0x238900, 1, 0x1f, 0x924}, + { 0x238940, 1, 0x1f, 0x924}, + { 0x238980, 1, 0x1f, 0x924}, + { 0x2389c0, 1, 0x1f, 0x924}, + { 0x238a00, 1, 0x1f, 0x926}, + { 0x238a40, 1, 0x1f, 0x924}, + { 0x238a80, 1, 0x1f, 0x492}, + { 0x238ac0, 1, 0x1f, 0x924}, + { 0x238b00, 1, 0x1f, 0x924}, + { 0x238b40, 1, 0x1f, 0x924}, + { 0x238b80, 1, 0x1f, 0x924}, + { 0x238bc0, 1, 0x1f, 0x924}, + { 0x238c00, 1, 0x1f, 0x924}, + { 0x238c40, 1, 0x1f, 0x924}, + { 0x238c80, 1, 0x1f, 0x924}, + { 0x238cc0, 1, 0x1f, 0x924}, + { 0x238cc4, 1, 0x1c, 0x924}, + { 0x238d00, 1, 0x1f, 0x924}, + { 0x238d40, 1, 0x1f, 0x924}, + { 0x238d80, 1, 0x1f, 0x924}, + { 0x238dc0, 1, 0x1f, 0x924}, + { 0x238e00, 1, 0x1f, 0x924}, + { 0x238e40, 1, 0x1f, 0x924}, + { 0x238e80, 1, 0x1f, 0x924}, + { 0x238e84, 1, 0x1c, 0x924}, + { 0x238ec0, 1, 0x1e, 0x924}, + { 0x238f00, 1, 0x1e, 0x924}, + { 0x238f40, 1, 0x1e, 0x924}, + { 0x238f80, 1, 0x1e, 0x924}, + { 0x238fc0, 1, 0x1e, 0x924}, + { 0x238fd4, 5, 0x1c, 0x924}, + { 0x238fe8, 2, 0x18, 0x924}, + { 0x239000, 1, 0x1c, 0x924}, + { 0x239040, 3, 0x1c, 0x924}, + { 0x23905c, 1, 0x18, 0x924}, + { 0x239064, 1, 0x10, 0x924}, + { 0x239080, 10, 0x10, 0x924}, + { 0x240000, 2, 0x1f, 0x924}, + { 0x280000, 65, 0x1f, 0x924}, + { 0x280124, 2, 0x1f, 0x1fff}, + { 0x280130, 3, 0x1f, 0x1fff}, + { 0x280140, 1, 0x1f, 0x1fff}, + { 0x28014c, 2, 0x1e, 0x924}, + { 0x280200, 27, 0x1f, 0x924}, + { 0x28026c, 1, 0x1f, 0xfff}, + { 0x280270, 12, 0x1f, 0x924}, + { 0x2802a0, 1, 0x1f, 0xfff}, + { 0x2802a4, 17, 0x1f, 0x924}, + { 0x280340, 4, 0x1f, 0x924}, + { 0x280380, 1, 0x1c, 0x924}, + { 0x280388, 1, 0x1c, 0x924}, + { 0x280390, 1, 0x1c, 0x924}, + { 0x280398, 1, 0x1c, 0x924}, + { 0x2803a0, 1, 0x1c, 0x924}, + { 0x2803a8, 2, 0x1c, 0x924}, + { 0x280400, 256, 0x3, 0xfff}, + { 0x282000, 4, 0x1f, 0x9e4}, + { 0x282010, 2044, 0x1f, 0x1c0}, + { 0x284000, 4, 0x18, 0x924}, + { 0x2a0000, 1, 0x1f, 0x964}, + { 0x2a0004, 5631, 0x1f, 0x40}, + { 0x2a5800, 2560, 0x1e, 0x40}, + { 0x2a8000, 1, 0x1f, 0x964}, + { 0x2a8004, 8191, 0x1e, 0x40}, + { 0x2b0000, 1, 0x1f, 0x964}, + { 0x2b0004, 15, 0x2, 0x40}, + { 0x2b0040, 1, 0x1e, 0x964}, + { 0x2b0044, 239, 0x2, 0x40}, + { 0x2b0400, 1, 0x1f, 0x964}, + { 0x2b0404, 255, 0x2, 0x40}, + { 0x2b0800, 1, 0x1f, 0x924}, + { 0x2b0840, 1, 0x1e, 0x924}, + { 0x2b0c00, 1, 0x1f, 0x924}, + { 0x2b1000, 1, 0x1f, 0x924}, + { 0x2b1040, 1, 0x1e, 0x924}, + { 0x2b1400, 1, 0x1f, 0x924}, + { 0x2b1440, 1, 0x1e, 0x924}, + { 0x2b1480, 1, 0x1e, 0x924}, + { 0x2b14c0, 1, 0x1e, 0x924}, + { 0x2b1800, 128, 0x1f, 0x80}, + { 0x2b1c00, 128, 0x1f, 0x80}, + { 0x2b2000, 1, 0x1f, 0xdb6}, + { 0x2b2400, 1, 0x1e, 0x964}, + { 0x2b2404, 5631, 0x1c, 0x40}, + { 0x2b8000, 1, 0x1f, 0xfff}, + { 0x2b8040, 1, 0x1f, 0xfff}, + { 0x2b8080, 1, 0x1f, 0xfff}, + { 0x2b80c0, 1, 0x1f, 0x924}, + { 0x2b8100, 1, 0x1f, 0x924}, + { 0x2b8140, 1, 0x1f, 0x924}, + { 0x2b8180, 1, 0x1f, 0x924}, + { 0x2b81c0, 1, 0x1f, 0x924}, + { 0x2b8200, 1, 0x1f, 0x924}, + { 0x2b8240, 1, 0x1f, 0x924}, + { 0x2b8280, 1, 0x1f, 0x924}, + { 0x2b82c0, 1, 0x1f, 0x924}, + { 0x2b8300, 1, 0x1f, 0x924}, + { 0x2b8340, 1, 0x1f, 0x924}, + { 0x2b8380, 1, 0x1f, 0x924}, + { 0x2b83c0, 1, 0x1f, 0x924}, + { 0x2b8400, 1, 0x1f, 0x924}, + { 0x2b8440, 1, 0x1f, 0x924}, + { 0x2b8480, 1, 0x1f, 0x924}, + { 0x2b84c0, 1, 0x1f, 0x924}, + { 0x2b8500, 1, 0x1f, 0x924}, + { 0x2b8540, 1, 0x1f, 0x924}, + { 0x2b8580, 1, 0x1f, 0x924}, + { 0x2b85c0, 19, 0x1c, 0x924}, + { 0x2b8800, 1, 0x1f, 0x924}, + { 0x2b8840, 1, 0x1f, 0x924}, + { 0x2b8880, 1, 0x1f, 0x924}, + { 0x2b88c0, 1, 0x1f, 0x924}, + { 0x2b8900, 1, 0x1f, 0x924}, + { 0x2b8940, 1, 0x1f, 0x924}, + { 0x2b8980, 1, 0x1f, 0x924}, + { 0x2b89c0, 1, 0x1f, 0x924}, + { 0x2b8a00, 1, 0x1f, 0x9a4}, + { 0x2b8a40, 1, 0x1f, 0x924}, + { 0x2b8a80, 1, 0x1f, 0x492}, + { 0x2b8ac0, 1, 0x1f, 0x924}, + { 0x2b8b00, 1, 0x1f, 0x924}, + { 0x2b8b40, 1, 0x1f, 0x924}, + { 0x2b8b80, 1, 0x1f, 0x924}, + { 0x2b8bc0, 1, 0x1f, 0x924}, + { 0x2b8c00, 1, 0x1f, 0x924}, + { 0x2b8c40, 1, 0x1f, 0x924}, + { 0x2b8c80, 1, 0x1f, 0x924}, + { 0x2b8cc0, 1, 0x1f, 0x924}, + { 0x2b8cc4, 1, 0x1c, 0x924}, + { 0x2b8d00, 1, 0x1f, 0x924}, + { 0x2b8d40, 1, 0x1f, 0x924}, + { 0x2b8d80, 1, 0x1f, 0x924}, + { 0x2b8dc0, 1, 0x1f, 0x924}, + { 0x2b8e00, 1, 0x1f, 0x924}, + { 0x2b8e40, 1, 0x1f, 0x924}, + { 0x2b8e80, 1, 0x1f, 0x924}, + { 0x2b8e84, 1, 0x1c, 0x924}, + { 0x2b8ec0, 1, 0x1e, 0x924}, + { 0x2b8f00, 1, 0x1e, 0x924}, + { 0x2b8f40, 1, 0x1e, 0x924}, + { 0x2b8f80, 1, 0x1e, 0x924}, + { 0x2b8fc0, 1, 0x1e, 0x924}, + { 0x2b8fd4, 5, 0x1c, 0x924}, + { 0x2b8fe8, 2, 0x18, 0x924}, + { 0x2b9000, 1, 0x1c, 0x924}, + { 0x2b9040, 3, 0x1c, 0x924}, + { 0x2b905c, 1, 0x18, 0x924}, + { 0x2b9064, 1, 0x10, 0x924}, + { 0x2b9080, 10, 0x10, 0x924}, + { 0x2c0000, 2, 0x1f, 0x1fff}, + { 0x300000, 65, 0x1f, 0x924}, + { 0x300124, 2, 0x1f, 0x1fff}, + { 0x300130, 3, 0x1f, 0x1fff}, + { 0x300140, 1, 0x1f, 0x1fff}, + { 0x30014c, 2, 0x1e, 0x924}, + { 0x300200, 27, 0x1f, 0x924}, + { 0x30026c, 1, 0x1f, 0xfff}, + { 0x300270, 12, 0x1f, 0x924}, + { 0x3002a0, 1, 0x1f, 0xfff}, + { 0x3002a4, 17, 0x1f, 0x924}, + { 0x300340, 4, 0x1f, 0x924}, + { 0x300380, 1, 0x1c, 0x924}, + { 0x300388, 1, 0x1c, 0x924}, + { 0x300390, 1, 0x1c, 0x924}, + { 0x300398, 1, 0x1c, 0x924}, + { 0x3003a0, 1, 0x1c, 0x924}, + { 0x3003a8, 2, 0x1c, 0x924}, + { 0x300400, 256, 0x3, 0xfff}, + { 0x302000, 4, 0x1f, 0xf24}, + { 0x302010, 2044, 0x1f, 0xe00}, + { 0x304000, 4, 0x18, 0x924}, + { 0x320000, 1, 0x1f, 0xb24}, + { 0x320004, 5631, 0x1f, 0x200}, + { 0x325800, 2560, 0x1e, 0x200}, + { 0x328000, 1, 0x1f, 0xb24}, + { 0x328004, 8191, 0x1e, 0x200}, + { 0x330000, 1, 0x1f, 0xb24}, + { 0x330004, 15, 0x2, 0x200}, + { 0x330040, 1, 0x1e, 0xb24}, + { 0x330044, 239, 0x2, 0x200}, + { 0x330400, 1, 0x1f, 0xb24}, + { 0x330404, 255, 0x2, 0x200}, + { 0x330800, 1, 0x1f, 0x924}, + { 0x330840, 1, 0x1e, 0x924}, + { 0x330c00, 1, 0x1f, 0x924}, + { 0x331000, 1, 0x1f, 0x924}, + { 0x331040, 1, 0x1e, 0x924}, + { 0x331400, 1, 0x1f, 0x924}, + { 0x331440, 1, 0x1e, 0x924}, + { 0x331480, 1, 0x1e, 0x924}, + { 0x3314c0, 1, 0x1e, 0x924}, + { 0x331800, 128, 0x1f, 0x400}, + { 0x331c00, 128, 0x1f, 0x400}, + { 0x332000, 1, 0x1f, 0xdb6}, + { 0x332400, 1, 0x1e, 0xb24}, + { 0x332404, 5631, 0x1c, 0x200}, + { 0x338000, 1, 0x1f, 0xfff}, + { 0x338040, 1, 0x1f, 0xfff}, + { 0x338080, 1, 0x1f, 0xfff}, + { 0x3380c0, 1, 0x1f, 0xfff}, + { 0x338100, 1, 0x1f, 0x924}, + { 0x338140, 1, 0x1f, 0x924}, + { 0x338180, 1, 0x1f, 0x924}, + { 0x3381c0, 1, 0x1f, 0x924}, + { 0x338200, 1, 0x1f, 0x924}, + { 0x338240, 1, 0x1f, 0x924}, + { 0x338280, 1, 0x1f, 0x924}, + { 0x3382c0, 1, 0x1f, 0x924}, + { 0x338300, 1, 0x1f, 0x924}, + { 0x338340, 1, 0x1f, 0x924}, + { 0x338380, 1, 0x1f, 0x924}, + { 0x3383c0, 1, 0x1f, 0x924}, + { 0x338400, 1, 0x1f, 0x924}, + { 0x338440, 1, 0x1f, 0x924}, + { 0x338480, 1, 0x1f, 0x924}, + { 0x3384c0, 1, 0x1f, 0x924}, + { 0x338500, 1, 0x1f, 0x924}, + { 0x338540, 1, 0x1f, 0x924}, + { 0x338580, 1, 0x1f, 0x924}, + { 0x3385c0, 19, 0x1c, 0x924}, + { 0x338800, 1, 0x1f, 0x924}, + { 0x338840, 1, 0x1f, 0x924}, + { 0x338880, 1, 0x1f, 0x924}, + { 0x3388c0, 1, 0x1f, 0x924}, + { 0x338900, 1, 0x1f, 0x924}, + { 0x338940, 1, 0x1f, 0x924}, + { 0x338980, 1, 0x1f, 0x924}, + { 0x3389c0, 1, 0x1f, 0x924}, + { 0x338a00, 1, 0x1f, 0xd24}, + { 0x338a40, 1, 0x1f, 0x924}, + { 0x338a80, 1, 0x1f, 0x492}, + { 0x338ac0, 1, 0x1f, 0x924}, + { 0x338b00, 1, 0x1f, 0x924}, + { 0x338b40, 1, 0x1f, 0x924}, + { 0x338b80, 1, 0x1f, 0x924}, + { 0x338bc0, 1, 0x1f, 0x924}, + { 0x338c00, 1, 0x1f, 0x924}, + { 0x338c40, 1, 0x1f, 0x924}, + { 0x338c80, 1, 0x1f, 0x924}, + { 0x338cc0, 1, 0x1f, 0x924}, + { 0x338cc4, 1, 0x1c, 0x924}, + { 0x338d00, 1, 0x1f, 0x924}, + { 0x338d40, 1, 0x1f, 0x924}, + { 0x338d80, 1, 0x1f, 0x924}, + { 0x338dc0, 1, 0x1f, 0x924}, + { 0x338e00, 1, 0x1f, 0x924}, + { 0x338e40, 1, 0x1f, 0x924}, + { 0x338e80, 1, 0x1f, 0x924}, + { 0x338e84, 1, 0x1c, 0x924}, + { 0x338ec0, 1, 0x1e, 0x924}, + { 0x338f00, 1, 0x1e, 0x924}, + { 0x338f40, 1, 0x1e, 0x924}, + { 0x338f80, 1, 0x1e, 0x924}, + { 0x338fc0, 1, 0x1e, 0x924}, + { 0x338fd4, 5, 0x1c, 0x924}, + { 0x338fe8, 2, 0x18, 0x924}, + { 0x339000, 1, 0x1c, 0x924}, + { 0x339040, 3, 0x1c, 0x924}, + { 0x33905c, 1, 0x18, 0x924}, + { 0x339064, 1, 0x10, 0x924}, + { 0x339080, 10, 0x10, 0x924}, + { 0x340000, 2, 0x1f, 0x924}, + { 0x3a0000, 40960, 0x1c, 0x1000} }; -#define REGS_COUNT ARRAY_SIZE(reg_addrs) -static const struct dump_sign dump_sign_all = { 0x4e23fde1, 0x70017, 0x3a }; +#define REGS_COUNT ARRAY_SIZE(reg_addrs) -static const u32 page_vals_e2[] = { 0, 128 }; -#define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2) +static const struct reg_addr idle_reg_addrs[] = { + { 0x2104, 1, 0x1f, 0xfff}, + { 0x2110, 2, 0x1f, 0xfff}, + { 0x211c, 8, 0x1f, 0xfff}, + { 0x2814, 1, 0x1f, 0xfff}, + { 0x281c, 2, 0x1f, 0xfff}, + { 0x2854, 1, 0x1f, 0xfff}, + { 0x285c, 1, 0x1f, 0xfff}, + { 0x3040, 1, 0x1f, 0xfff}, + { 0x9010, 7, 0x1c, 0xfff}, + { 0x9030, 1, 0x1c, 0xfff}, + { 0x9068, 16, 0x1c, 0xfff}, + { 0x9230, 2, 0x1c, 0xfff}, + { 0x9244, 1, 0x1c, 0xfff}, + { 0x9298, 1, 0x1c, 0xfff}, + { 0x92a8, 1, 0x1c, 0x1fff}, + { 0xa38c, 1, 0x1f, 0x1fff}, + { 0xa3c4, 1, 0x1e, 0xfff}, + { 0xa404, 1, 0x1f, 0xfff}, + { 0xa408, 2, 0x1f, 0x1fff}, + { 0xa42c, 12, 0x1f, 0xfff}, + { 0xa580, 1, 0x1f, 0x1fff}, + { 0xa590, 1, 0x1f, 0x1fff}, + { 0xa600, 5, 0x1e, 0xfff}, + { 0xa618, 1, 0x1e, 0xfff}, + { 0xa714, 1, 0x1c, 0xfff}, + { 0xa720, 1, 0x1c, 0xfff}, + { 0xa750, 1, 0x1c, 0xfff}, + { 0xc09c, 1, 0x3, 0xfff}, + { 0x103b0, 1, 0x1f, 0xfff}, + { 0x103c0, 1, 0x1f, 0xfff}, + { 0x103d0, 1, 0x3, 0x1fff}, + { 0x10418, 1, 0x1f, 0xfff}, + { 0x10420, 1, 0x1f, 0xfff}, + { 0x10428, 1, 0x1f, 0xfff}, + { 0x10460, 1, 0x1f, 0xfff}, + { 0x10474, 1, 0x1f, 0xfff}, + { 0x104e0, 1, 0x1f, 0xfff}, + { 0x104ec, 1, 0x1f, 0xfff}, + { 0x104f8, 1, 0x1f, 0xfff}, + { 0x10508, 1, 0x1f, 0xfff}, + { 0x10530, 1, 0x1f, 0xfff}, + { 0x10538, 1, 0x1f, 0xfff}, + { 0x10548, 1, 0x1f, 0xfff}, + { 0x10558, 1, 0x1f, 0xfff}, + { 0x182a8, 1, 0x1c, 0xfff}, + { 0x182b8, 1, 0x1c, 0xfff}, + { 0x18308, 1, 0x1c, 0xfff}, + { 0x18318, 1, 0x1c, 0xfff}, + { 0x18338, 1, 0x1c, 0xfff}, + { 0x18348, 1, 0x1c, 0xfff}, + { 0x183bc, 1, 0x1c, 0x1fff}, + { 0x183cc, 1, 0x1c, 0x1fff}, + { 0x18570, 1, 0x18, 0xfff}, + { 0x18578, 1, 0x18, 0xfff}, + { 0x1858c, 1, 0x18, 0xfff}, + { 0x18594, 1, 0x18, 0xfff}, + { 0x1862c, 4, 0x10, 0xfff}, + { 0x2021c, 11, 0x1f, 0xfff}, + { 0x202a8, 1, 0x1f, 0xfff}, + { 0x202b8, 1, 0x1f, 0x1fff}, + { 0x20404, 1, 0x1f, 0xfff}, + { 0x2040c, 2, 0x1f, 0xfff}, + { 0x2041c, 2, 0x1f, 0xfff}, + { 0x40154, 14, 0x1f, 0xfff}, + { 0x40198, 1, 0x1f, 0x1fff}, + { 0x404ac, 1, 0x1f, 0xfff}, + { 0x404bc, 1, 0x1f, 0x1fff}, + { 0x42290, 1, 0x1f, 0xfff}, + { 0x422a0, 1, 0x1f, 0xfff}, + { 0x422b0, 1, 0x1f, 0x1fff}, + { 0x42548, 1, 0x1f, 0xfff}, + { 0x42550, 1, 0x1f, 0xfff}, + { 0x42558, 1, 0x1f, 0xfff}, + { 0x50160, 8, 0x1f, 0xfff}, + { 0x501d0, 1, 0x1f, 0xfff}, + { 0x501e0, 1, 0x1f, 0x1fff}, + { 0x50204, 1, 0x1f, 0xfff}, + { 0x5020c, 2, 0x1f, 0xfff}, + { 0x5021c, 1, 0x1f, 0xfff}, + { 0x60090, 1, 0x1f, 0xfff}, + { 0x6011c, 1, 0x1f, 0xfff}, + { 0x6012c, 1, 0x1f, 0x1fff}, + { 0xc101c, 1, 0x1f, 0xfff}, + { 0xc102c, 1, 0x1f, 0x1fff}, + { 0xc2290, 1, 0x1f, 0xfff}, + { 0xc22a0, 1, 0x1f, 0xfff}, + { 0xc22b0, 1, 0x1f, 0x1fff}, + { 0xc2548, 1, 0x1f, 0xfff}, + { 0xc2550, 1, 0x1f, 0xfff}, + { 0xc2558, 1, 0x1f, 0xfff}, + { 0xc4294, 1, 0x1f, 0xfff}, + { 0xc42a4, 1, 0x1f, 0xfff}, + { 0xc42b4, 1, 0x1f, 0x1fff}, + { 0xc4550, 1, 0x1f, 0xfff}, + { 0xc4558, 1, 0x1f, 0xfff}, + { 0xc4560, 1, 0x1f, 0xfff}, + { 0xd016c, 8, 0x1f, 0xfff}, + { 0xd01d8, 1, 0x1f, 0xfff}, + { 0xd01e8, 1, 0x1f, 0x1fff}, + { 0xd0204, 1, 0x1f, 0xfff}, + { 0xd020c, 3, 0x1f, 0xfff}, + { 0xe0154, 8, 0x1f, 0xfff}, + { 0xe01c8, 1, 0x1f, 0xfff}, + { 0xe01d8, 1, 0x1f, 0x1fff}, + { 0xe0204, 1, 0x1f, 0xfff}, + { 0xe020c, 2, 0x1f, 0xfff}, + { 0xe021c, 2, 0x1f, 0xfff}, + { 0x101014, 1, 0x1f, 0xfff}, + { 0x101030, 1, 0x1f, 0xfff}, + { 0x101040, 1, 0x1f, 0x1fff}, + { 0x102058, 1, 0x1f, 0x1fff}, + { 0x102080, 16, 0x1f, 0xfff}, + { 0x103004, 2, 0x1f, 0xfff}, + { 0x103068, 1, 0x1f, 0xfff}, + { 0x103078, 1, 0x1f, 0xfff}, + { 0x103088, 1, 0x1f, 0x1fff}, + { 0x10309c, 2, 0x1e, 0xfff}, + { 0x1030b8, 2, 0x1c, 0xfff}, + { 0x1030cc, 1, 0x1c, 0xfff}, + { 0x1030e0, 1, 0x1c, 0xfff}, + { 0x104004, 1, 0x1f, 0xfff}, + { 0x104018, 1, 0x1f, 0xfff}, + { 0x104020, 1, 0x1f, 0xfff}, + { 0x10403c, 1, 0x1f, 0xfff}, + { 0x1040fc, 1, 0x1f, 0xfff}, + { 0x10410c, 1, 0x1f, 0x1fff}, + { 0x104400, 1, 0x1f, 0x1fff}, + { 0x104404, 63, 0x1f, 0xfff}, + { 0x104800, 1, 0x1f, 0x1fff}, + { 0x104804, 63, 0x1f, 0xfff}, + { 0x105000, 4, 0x1f, 0x1fff}, + { 0x105010, 252, 0x1f, 0xfff}, + { 0x108094, 1, 0x3, 0xfff}, + { 0x1201b0, 2, 0x1f, 0xfff}, + { 0x12032c, 1, 0x1f, 0xfff}, + { 0x12036c, 3, 0x1f, 0xfff}, + { 0x120408, 2, 0x1f, 0xfff}, + { 0x120414, 15, 0x1f, 0xfff}, + { 0x120478, 2, 0x1f, 0xfff}, + { 0x12052c, 1, 0x1f, 0xfff}, + { 0x120564, 3, 0x1f, 0xfff}, + { 0x12057c, 1, 0x1f, 0x1fff}, + { 0x12058c, 1, 0x1f, 0x1fff}, + { 0x120608, 1, 0x1e, 0xfff}, + { 0x120748, 1, 0x1c, 0xfff}, + { 0x120778, 2, 0x1c, 0xfff}, + { 0x120808, 3, 0x1f, 0xfff}, + { 0x120818, 1, 0x1f, 0xfff}, + { 0x120820, 1, 0x1f, 0xfff}, + { 0x120828, 1, 0x1f, 0xfff}, + { 0x120830, 1, 0x1f, 0xfff}, + { 0x120838, 1, 0x1f, 0xfff}, + { 0x120840, 1, 0x1f, 0xfff}, + { 0x120848, 1, 0x1f, 0xfff}, + { 0x120850, 1, 0x1f, 0xfff}, + { 0x120858, 1, 0x1f, 0xfff}, + { 0x120860, 1, 0x1f, 0xfff}, + { 0x120868, 1, 0x1f, 0xfff}, + { 0x120870, 1, 0x1f, 0xfff}, + { 0x120878, 1, 0x1f, 0xfff}, + { 0x120880, 1, 0x1f, 0xfff}, + { 0x120888, 1, 0x1f, 0xfff}, + { 0x120890, 1, 0x1f, 0xfff}, + { 0x120898, 1, 0x1f, 0xfff}, + { 0x1208a0, 1, 0x1f, 0xfff}, + { 0x1208a8, 1, 0x1f, 0xfff}, + { 0x1208b0, 1, 0x1f, 0xfff}, + { 0x1208b8, 1, 0x1f, 0xfff}, + { 0x1208c0, 1, 0x1f, 0xfff}, + { 0x1208c8, 1, 0x1f, 0xfff}, + { 0x1208d0, 1, 0x1f, 0xfff}, + { 0x1208d8, 1, 0x1f, 0xfff}, + { 0x1208e0, 1, 0x1f, 0xfff}, + { 0x1208e8, 1, 0x1f, 0xfff}, + { 0x1208f0, 1, 0x1f, 0xfff}, + { 0x1208f8, 1, 0x1f, 0xfff}, + { 0x120900, 1, 0x1f, 0xfff}, + { 0x120908, 1, 0x1f, 0xfff}, + { 0x130030, 1, 0x1c, 0xfff}, + { 0x13004c, 3, 0x1c, 0xfff}, + { 0x130064, 2, 0x1c, 0xfff}, + { 0x13009c, 1, 0x1c, 0x1fff}, + { 0x130130, 1, 0x1c, 0xfff}, + { 0x13016c, 1, 0x1c, 0xfff}, + { 0x130300, 1, 0x1c, 0xfff}, + { 0x130480, 1, 0x1c, 0xfff}, + { 0x14005c, 2, 0xf, 0xfff}, + { 0x1400d0, 2, 0xf, 0xfff}, + { 0x1400e0, 1, 0xf, 0xfff}, + { 0x1401c8, 1, 0xf, 0xfff}, + { 0x140200, 6, 0xf, 0xfff}, + { 0x140338, 7, 0x10, 0xfff}, + { 0x140370, 7, 0x10, 0xfff}, + { 0x15c1bc, 6, 0x10, 0xfff}, + { 0x15c230, 7, 0x10, 0xfff}, + { 0x16101c, 1, 0x1f, 0xfff}, + { 0x16102c, 1, 0x1f, 0x1fff}, + { 0x164014, 2, 0x1f, 0xfff}, + { 0x1640f0, 1, 0x1f, 0xfff}, + { 0x166290, 1, 0x1f, 0xfff}, + { 0x1662a0, 1, 0x1f, 0xfff}, + { 0x1662b0, 1, 0x1f, 0x1fff}, + { 0x166548, 1, 0x1f, 0xfff}, + { 0x166550, 1, 0x1f, 0xfff}, + { 0x166558, 1, 0x1f, 0xfff}, + { 0x168000, 1, 0x1f, 0xfff}, + { 0x168008, 1, 0x1f, 0xfff}, + { 0x168010, 1, 0x1f, 0xfff}, + { 0x168018, 1, 0x1f, 0xfff}, + { 0x168028, 2, 0x1f, 0xfff}, + { 0x168058, 9, 0x1f, 0xfff}, + { 0x168238, 1, 0x1f, 0xfff}, + { 0x1682d0, 7, 0x1f, 0xfff}, + { 0x168300, 2, 0x3, 0xfff}, + { 0x168308, 65, 0x1f, 0xfff}, + { 0x168410, 2, 0x1f, 0xfff}, + { 0x168438, 1, 0x1f, 0xfff}, + { 0x168448, 1, 0x1f, 0x1fff}, + { 0x168a00, 128, 0x1f, 0xfff}, + { 0x16e200, 128, 0x2, 0xfff}, + { 0x16e404, 2, 0x2, 0xfff}, + { 0x16e584, 64, 0x2, 0xfff}, + { 0x16e684, 2, 0x1e, 0xfff}, + { 0x16e68c, 4, 0x2, 0xfff}, + { 0x16e6fc, 4, 0x1c, 0xfff}, + { 0x16e7ac, 12, 0x10, 0xfff}, + { 0x1700a4, 1, 0x1f, 0xfff}, + { 0x1700ac, 2, 0x1f, 0xfff}, + { 0x1700c0, 1, 0x1f, 0xfff}, + { 0x170174, 1, 0x1f, 0xfff}, + { 0x170184, 1, 0x1f, 0x1fff}, + { 0x1800f4, 1, 0x1f, 0xfff}, + { 0x180104, 1, 0x1f, 0xfff}, + { 0x180114, 1, 0x1f, 0x1fff}, + { 0x180124, 1, 0x1f, 0x1fff}, + { 0x18026c, 1, 0x1f, 0xfff}, + { 0x1802a0, 1, 0x1f, 0xfff}, + { 0x1b8000, 1, 0x1f, 0xfff}, + { 0x1b8040, 1, 0x1f, 0xfff}, + { 0x1b8080, 1, 0x1f, 0xfff}, + { 0x1b80c0, 1, 0x1f, 0xfff}, + { 0x200104, 1, 0x1f, 0xfff}, + { 0x200114, 1, 0x1f, 0xfff}, + { 0x200124, 1, 0x1f, 0x1fff}, + { 0x200134, 1, 0x1f, 0x1fff}, + { 0x20026c, 1, 0x1f, 0xfff}, + { 0x2002a0, 1, 0x1f, 0xfff}, + { 0x238000, 1, 0x1f, 0xfff}, + { 0x238040, 1, 0x1f, 0xfff}, + { 0x238080, 1, 0x1f, 0xfff}, + { 0x2380c0, 1, 0x1f, 0xfff}, + { 0x280104, 1, 0x1f, 0xfff}, + { 0x280114, 1, 0x1f, 0xfff}, + { 0x280124, 1, 0x1f, 0x1fff}, + { 0x280134, 1, 0x1f, 0x1fff}, + { 0x28026c, 1, 0x1f, 0xfff}, + { 0x2802a0, 1, 0x1f, 0xfff}, + { 0x2b8000, 1, 0x1f, 0xfff}, + { 0x2b8040, 1, 0x1f, 0xfff}, + { 0x2b8080, 1, 0x1f, 0xfff}, + { 0x300104, 1, 0x1f, 0xfff}, + { 0x300114, 1, 0x1f, 0xfff}, + { 0x300124, 1, 0x1f, 0x1fff}, + { 0x300134, 1, 0x1f, 0x1fff}, + { 0x30026c, 1, 0x1f, 0xfff}, + { 0x3002a0, 1, 0x1f, 0xfff}, + { 0x338000, 1, 0x1f, 0xfff}, + { 0x338040, 1, 0x1f, 0xfff}, + { 0x338080, 1, 0x1f, 0xfff}, + { 0x3380c0, 1, 0x1f, 0xfff} +}; -static const u32 page_write_regs_e2[] = { 328476 }; -#define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2) +#define IDLE_REGS_COUNT ARRAY_SIZE(idle_reg_addrs) -static const struct reg_addr page_read_regs_e2[] = { - { 0x58000, 4608, RI_E2_ONLINE } }; -#define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2) +static const u32 read_reg_e1[] = { + 0x1b1000}; -static const u32 page_vals_e3[] = { 0, 128 }; -#define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3) +static const struct wreg_addr wreg_addr_e1 = { + 0x1b0c00, 192, 1, read_reg_e1, 0x1f, 0x1fff}; -static const u32 page_write_regs_e3[] = { 328476 }; -#define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3) +static const u32 read_reg_e1h[] = { + 0x1b1040, 0x1b1000}; -static const struct reg_addr page_read_regs_e3[] = { - { 0x58000, 4608, RI_E3E3B0_ONLINE } }; -#define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3) +static const struct wreg_addr wreg_addr_e1h = { + 0x1b0c00, 256, 2, read_reg_e1h, 0x1f, 0x1fff}; + +static const u32 read_reg_e2[] = { + 0x1b1040, 0x1b1000}; + +static const struct wreg_addr wreg_addr_e2 = { + 0x1b0c00, 128, 2, read_reg_e2, 0x1f, 0x1fff}; -#endif /* BNX2X_DUMP_H */ +static const u32 read_reg_e3[] = { + 0x1b1040, 0x1b1000}; + +static const struct wreg_addr wreg_addr_e3 = { + 0x1b0c00, 128, 2, read_reg_e3, 0x1f, 0x1fff}; + +static const u32 read_reg_e3b0[] = { + 0x1b1040, 0x1b1000}; + +static const struct wreg_addr wreg_addr_e3b0 = { + 0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff}; + +static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = { + {20782, 18567, 27975, 19729, 18311, 27719, 20836, 32391, 41799, 20812, + 26247, 35655, 19074}, + {32774, 19297, 33277, 31721, 19041, 33021, 32828, 33121, 47101, 32804, + 26977, 40957, 35895}, + {36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557, + 25608, 41377, 43903}, + {45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269, + 25616, 42067, 43903}, + {45302, 17999, 34802, 44249, 18771, 35571, 45356, 31823, 48626, 45332, + 25679, 42482, 43903} +}; +#endif diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index a427b49a886c..9a674b14b403 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1,6 +1,6 @@ /* bnx2x_ethtool.c: Broadcom Everest network driver. * - * Copyright (c) 2007-2012 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -186,6 +186,7 @@ static const struct { }; #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) + static int bnx2x_get_port_type(struct bnx2x *bp) { int port_type; @@ -233,7 +234,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up && !(bp->flags & MF_FUNC_DIS)) { - cmd->duplex = bp->link_vars.duplex; + cmd->duplex = bp->link_vars.duplex; if (IS_MF(bp) && !BP_NOMCP(bp)) ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); @@ -399,7 +400,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); return -EINVAL; } - /* Save new config in case command complete successully */ + /* Save new config in case command complete successfully */ new_multi_phy_config = bp->link_params.multi_phy_config; /* Get the new cfg_idx */ cfg_idx = bnx2x_get_link_cfg_idx(bp); @@ -596,29 +597,58 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return 0; } -#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE) -#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE) -#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE) -#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE) -#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE) +#define DUMP_ALL_PRESETS 0x1FFF +#define DUMP_MAX_PRESETS 13 -static bool bnx2x_is_reg_online(struct bnx2x *bp, - const struct reg_addr *reg_info) +static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset) { if (CHIP_IS_E1(bp)) - return IS_E1_ONLINE(reg_info->info); + return dump_num_registers[0][preset-1]; else if (CHIP_IS_E1H(bp)) - return IS_E1H_ONLINE(reg_info->info); + return dump_num_registers[1][preset-1]; else if (CHIP_IS_E2(bp)) - return IS_E2_ONLINE(reg_info->info); + return dump_num_registers[2][preset-1]; else if (CHIP_IS_E3A0(bp)) - return IS_E3_ONLINE(reg_info->info); + return dump_num_registers[3][preset-1]; else if (CHIP_IS_E3B0(bp)) - return IS_E3B0_ONLINE(reg_info->info); + return dump_num_registers[4][preset-1]; else - return false; + return 0; +} + +static int __bnx2x_get_regs_len(struct bnx2x *bp) +{ + u32 preset_idx; + int regdump_len = 0; + + /* Calculate the total preset regs length */ + for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) + regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx); + + return regdump_len; +} + +static int bnx2x_get_regs_len(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + int regdump_len = 0; + + regdump_len = __bnx2x_get_regs_len(bp); + regdump_len *= 4; + regdump_len += sizeof(struct dump_header); + + return regdump_len; } +#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) +#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) +#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) +#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) +#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) + +#define IS_REG_IN_PRESET(presets, idx) \ + ((presets & (1 << (idx-1))) == (1 << (idx-1))) + /******* Paged registers info selectors ********/ static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) { @@ -680,38 +710,39 @@ static u32 __bnx2x_get_page_read_num(struct bnx2x *bp) return 0; } -static int __bnx2x_get_regs_len(struct bnx2x *bp) +static bool bnx2x_is_reg_in_chip(struct bnx2x *bp, + const struct reg_addr *reg_info) { - int num_pages = __bnx2x_get_page_reg_num(bp); - int page_write_num = __bnx2x_get_page_write_num(bp); - const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp); - int page_read_num = __bnx2x_get_page_read_num(bp); - int regdump_len = 0; - int i, j, k; - - for (i = 0; i < REGS_COUNT; i++) - if (bnx2x_is_reg_online(bp, ®_addrs[i])) - regdump_len += reg_addrs[i].size; - - for (i = 0; i < num_pages; i++) - for (j = 0; j < page_write_num; j++) - for (k = 0; k < page_read_num; k++) - if (bnx2x_is_reg_online(bp, &page_read_addr[k])) - regdump_len += page_read_addr[k].size; - - return regdump_len; + if (CHIP_IS_E1(bp)) + return IS_E1_REG(reg_info->chips); + else if (CHIP_IS_E1H(bp)) + return IS_E1H_REG(reg_info->chips); + else if (CHIP_IS_E2(bp)) + return IS_E2_REG(reg_info->chips); + else if (CHIP_IS_E3A0(bp)) + return IS_E3A0_REG(reg_info->chips); + else if (CHIP_IS_E3B0(bp)) + return IS_E3B0_REG(reg_info->chips); + else + return false; } -static int bnx2x_get_regs_len(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - int regdump_len = 0; - regdump_len = __bnx2x_get_regs_len(bp); - regdump_len *= 4; - regdump_len += sizeof(struct dump_hdr); - - return regdump_len; +static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp, + const struct wreg_addr *wreg_info) +{ + if (CHIP_IS_E1(bp)) + return IS_E1_REG(wreg_info->chips); + else if (CHIP_IS_E1H(bp)) + return IS_E1H_REG(wreg_info->chips); + else if (CHIP_IS_E2(bp)) + return IS_E2_REG(wreg_info->chips); + else if (CHIP_IS_E3A0(bp)) + return IS_E3A0_REG(wreg_info->chips); + else if (CHIP_IS_E3B0(bp)) + return IS_E3B0_REG(wreg_info->chips); + else + return false; } /** @@ -725,9 +756,10 @@ static int bnx2x_get_regs_len(struct net_device *dev) * ("read address"). There may be more than one write address per "page" and * more than one read address per write address. */ -static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p) +static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset) { u32 i, j, k, n; + /* addresses of the paged registers */ const u32 *page_addr = __bnx2x_get_page_addr_ar(bp); /* number of paged registers */ @@ -740,32 +772,100 @@ static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p) const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp); /* number of read addresses */ int read_num = __bnx2x_get_page_read_num(bp); + u32 addr, size; for (i = 0; i < num_pages; i++) { for (j = 0; j < write_num; j++) { REG_WR(bp, write_addr[j], page_addr[i]); - for (k = 0; k < read_num; k++) - if (bnx2x_is_reg_online(bp, &read_addr[k])) - for (n = 0; n < - read_addr[k].size; n++) - *p++ = REG_RD(bp, - read_addr[k].addr + n*4); + + for (k = 0; k < read_num; k++) { + if (IS_REG_IN_PRESET(read_addr[k].presets, + preset)) { + size = read_addr[k].size; + for (n = 0; n < size; n++) { + addr = read_addr[k].addr + n*4; + *p++ = REG_RD(bp, addr); + } + } + } } } } -static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) +static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset) { - u32 i, j; + u32 i, j, addr; + const struct wreg_addr *wreg_addr_p = NULL; + + if (CHIP_IS_E1(bp)) + wreg_addr_p = &wreg_addr_e1; + else if (CHIP_IS_E1H(bp)) + wreg_addr_p = &wreg_addr_e1h; + else if (CHIP_IS_E2(bp)) + wreg_addr_p = &wreg_addr_e2; + else if (CHIP_IS_E3A0(bp)) + wreg_addr_p = &wreg_addr_e3; + else if (CHIP_IS_E3B0(bp)) + wreg_addr_p = &wreg_addr_e3b0; + + /* Read the idle_chk registers */ + for (i = 0; i < IDLE_REGS_COUNT; i++) { + if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) && + IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { + for (j = 0; j < idle_reg_addrs[i].size; j++) + *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4); + } + } /* Read the regular registers */ - for (i = 0; i < REGS_COUNT; i++) - if (bnx2x_is_reg_online(bp, ®_addrs[i])) + for (i = 0; i < REGS_COUNT; i++) { + if (bnx2x_is_reg_in_chip(bp, ®_addrs[i]) && + IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { for (j = 0; j < reg_addrs[i].size; j++) *p++ = REG_RD(bp, reg_addrs[i].addr + j*4); + } + } + + /* Read the CAM registers */ + if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) && + IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { + for (i = 0; i < wreg_addr_p->size; i++) { + *p++ = REG_RD(bp, wreg_addr_p->addr + i*4); - /* Read "paged" registes */ - bnx2x_read_pages_regs(bp, p); + /* In case of wreg_addr register, read additional + registers from read_regs array + */ + for (j = 0; j < wreg_addr_p->read_regs_count; j++) { + addr = *(wreg_addr_p->read_regs); + *p++ = REG_RD(bp, addr + j*4); + } + } + } + + /* Paged registers are supported in E2 & E3 only */ + if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { + /* Read "paged" registes */ + bnx2x_read_pages_regs(bp, p, preset); + } + + return 0; +} + +static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) +{ + u32 preset_idx; + + /* Read all registers, by reading all preset registers */ + for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { + /* Skip presets with IOR */ + if ((preset_idx == 2) || + (preset_idx == 5) || + (preset_idx == 8) || + (preset_idx == 11)) + continue; + __bnx2x_get_preset_regs(bp, p, preset_idx); + p += __bnx2x_get_preset_regs_len(bp, preset_idx); + } } static void bnx2x_get_regs(struct net_device *dev, @@ -773,9 +873,9 @@ static void bnx2x_get_regs(struct net_device *dev, { u32 *p = _p; struct bnx2x *bp = netdev_priv(dev); - struct dump_hdr dump_hdr = {0}; + struct dump_header dump_hdr = {0}; - regs->version = 1; + regs->version = 2; memset(p, 0, regs->len); if (!netif_running(bp->dev)) @@ -785,53 +885,173 @@ static void bnx2x_get_regs(struct net_device *dev, * cause false alarms by reading never written registers. We * will re-enable parity attentions right after the dump. */ + + /* Disable parity on path 0 */ + bnx2x_pretend_func(bp, 0); bnx2x_disable_blocks_parity(bp); - dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; - dump_hdr.dump_sign = dump_sign_all; - dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); - dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR); - dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR); - dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR); + /* Disable parity on path 1 */ + bnx2x_pretend_func(bp, 1); + bnx2x_disable_blocks_parity(bp); - if (CHIP_IS_E1(bp)) - dump_hdr.info = RI_E1_ONLINE; - else if (CHIP_IS_E1H(bp)) - dump_hdr.info = RI_E1H_ONLINE; - else if (!CHIP_IS_E1x(bp)) - dump_hdr.info = RI_E2_ONLINE | - (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP); + /* Return to current function */ + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; + dump_hdr.preset = DUMP_ALL_PRESETS; + dump_hdr.version = BNX2X_DUMP_VERSION; + + /* dump_meta_data presents OR of CHIP and PATH. */ + if (CHIP_IS_E1(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E1; + } else if (CHIP_IS_E1H(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E1H; + } else if (CHIP_IS_E2(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E2 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } else if (CHIP_IS_E3A0(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } else if (CHIP_IS_E3B0(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } - memcpy(p, &dump_hdr, sizeof(struct dump_hdr)); - p += dump_hdr.hdr_size + 1; + memcpy(p, &dump_hdr, sizeof(struct dump_header)); + p += dump_hdr.header_size + 1; /* Actually read the registers */ __bnx2x_get_regs(bp, p); - /* Re-enable parity attentions */ + /* Re-enable parity attentions on path 0 */ + bnx2x_pretend_func(bp, 0); bnx2x_clear_blocks_parity(bp); bnx2x_enable_blocks_parity(bp); + + /* Re-enable parity attentions on path 1 */ + bnx2x_pretend_func(bp, 1); + bnx2x_clear_blocks_parity(bp); + bnx2x_enable_blocks_parity(bp); + + /* Return to current function */ + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); +} + +static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset) +{ + struct bnx2x *bp = netdev_priv(dev); + int regdump_len = 0; + + regdump_len = __bnx2x_get_preset_regs_len(bp, preset); + regdump_len *= 4; + regdump_len += sizeof(struct dump_header); + + return regdump_len; +} + +static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val) +{ + struct bnx2x *bp = netdev_priv(dev); + + /* Use the ethtool_dump "flag" field as the dump preset index */ + bp->dump_preset_idx = val->flag; + return 0; +} + +static int bnx2x_get_dump_flag(struct net_device *dev, + struct ethtool_dump *dump) +{ + struct bnx2x *bp = netdev_priv(dev); + + /* Calculate the requested preset idx length */ + dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx); + DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n", + bp->dump_preset_idx, dump->len); + + dump->flag = ETHTOOL_GET_DUMP_DATA; + return 0; +} + +static int bnx2x_get_dump_data(struct net_device *dev, + struct ethtool_dump *dump, + void *buffer) +{ + u32 *p = buffer; + struct bnx2x *bp = netdev_priv(dev); + struct dump_header dump_hdr = {0}; + + memset(p, 0, dump->len); + + /* Disable parity attentions as long as following dump may + * cause false alarms by reading never written registers. We + * will re-enable parity attentions right after the dump. + */ + + /* Disable parity on path 0 */ + bnx2x_pretend_func(bp, 0); + bnx2x_disable_blocks_parity(bp); + + /* Disable parity on path 1 */ + bnx2x_pretend_func(bp, 1); + bnx2x_disable_blocks_parity(bp); + + /* Return to current function */ + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; + dump_hdr.preset = bp->dump_preset_idx; + dump_hdr.version = BNX2X_DUMP_VERSION; + + DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset); + + /* dump_meta_data presents OR of CHIP and PATH. */ + if (CHIP_IS_E1(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E1; + } else if (CHIP_IS_E1H(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E1H; + } else if (CHIP_IS_E2(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E2 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } else if (CHIP_IS_E3A0(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } else if (CHIP_IS_E3B0(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } + + memcpy(p, &dump_hdr, sizeof(struct dump_header)); + p += dump_hdr.header_size + 1; + + /* Actually read the registers */ + __bnx2x_get_preset_regs(bp, p, dump_hdr.preset); + + /* Re-enable parity attentions on path 0 */ + bnx2x_pretend_func(bp, 0); + bnx2x_clear_blocks_parity(bp); + bnx2x_enable_blocks_parity(bp); + + /* Re-enable parity attentions on path 1 */ + bnx2x_pretend_func(bp, 1); + bnx2x_clear_blocks_parity(bp); + bnx2x_enable_blocks_parity(bp); + + /* Return to current function */ + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + return 0; } static void bnx2x_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct bnx2x *bp = netdev_priv(dev); - u8 phy_fw_ver[PHY_FW_VER_LEN]; strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); - phy_fw_ver[0] = '\0'; - bnx2x_get_ext_phy_fw_version(&bp->link_params, - phy_fw_ver, PHY_FW_VER_LEN); - strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version)); - snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), - "bc %d.%d.%d%s%s", - (bp->common.bc_ver & 0xff0000) >> 16, - (bp->common.bc_ver & 0xff00) >> 8, - (bp->common.bc_ver & 0xff), - ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); + bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version)); + strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); info->n_stats = BNX2X_NUM_STATS; info->testinfo_len = BNX2X_NUM_TESTS(bp); @@ -861,13 +1081,13 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct bnx2x *bp = netdev_priv(dev); if (wol->wolopts & ~WAKE_MAGIC) { - DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n"); + DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n"); return -EINVAL; } if (wol->wolopts & WAKE_MAGIC) { if (bp->flags & NO_WOL_FLAG) { - DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n"); + DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n"); return -EINVAL; } bp->wol = 1; @@ -890,7 +1110,7 @@ static void bnx2x_set_msglevel(struct net_device *dev, u32 level) if (capable(CAP_NET_ADMIN)) { /* dump MCP trace */ - if (level & BNX2X_MSG_MCP) + if (IS_PF(bp) && (level & BNX2X_MSG_MCP)) bnx2x_fw_dump_lvl(bp, KERN_INFO); bp->msg_enable = level; } @@ -940,7 +1160,7 @@ static int bnx2x_get_eeprom_len(struct net_device *dev) * Pf B takes the lock and proceeds to perform it's own access. * pf A unlocks the per port lock, while pf B is still working (!). * mcp takes the per port lock and corrupts pf B's access (and/or has it's own - * acess corrupted by pf B).* + * access corrupted by pf B) */ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) { @@ -1070,7 +1290,8 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); /* we read nvram data in cpu order * but ethtool sees it as an array of bytes - * converting to big-endian will do the work */ + * converting to big-endian will do the work + */ *ret_val = cpu_to_be32(val); rc = 0; break; @@ -1297,7 +1518,8 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, val |= (*data_buf << BYTE_OFFSET(offset)); /* nvram data is returned as an array of bytes - * convert it back to cpu order */ + * convert it back to cpu order + */ val = be32_to_cpu(val); rc = bnx2x_nvram_write_dword(bp, align_offset, val, @@ -1509,6 +1731,10 @@ static int bnx2x_set_ringparam(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); + DP(BNX2X_MSG_ETHTOOL, + "set ring params command parameters: rx_pending = %d, tx_pending = %d\n", + ering->rx_pending, ering->tx_pending); + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { DP(BNX2X_MSG_ETHTOOL, "Handling parity error recovery. Try again later\n"); @@ -1747,7 +1973,6 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) return 0; } - enum { BNX2X_CHIP_E1_OFST = 0, BNX2X_CHIP_E1H_OFST, @@ -1875,7 +2100,8 @@ static int bnx2x_test_registers(struct bnx2x *bp) hw = BNX2X_CHIP_MASK_E3; /* Repeat the test twice: - First by writing 0x00000000, second by writing 0xffffffff */ + * First by writing 0x00000000, second by writing 0xffffffff + */ for (idx = 0; idx < 2; idx++) { switch (idx) { @@ -2388,8 +2614,8 @@ static void bnx2x_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) { struct bnx2x *bp = netdev_priv(dev); - u8 is_serdes; - int rc; + u8 is_serdes, link_up; + int rc, cnt = 0; if (bp->recovery_state != BNX2X_RECOVERY_DONE) { netdev_err(bp->dev, @@ -2397,6 +2623,7 @@ static void bnx2x_self_test(struct net_device *dev, etest->flags |= ETH_TEST_FL_FAILED; return; } + DP(BNX2X_MSG_ETHTOOL, "Self-test command parameters: offline = %d, external_lb = %d\n", (etest->flags & ETH_TEST_FL_OFFLINE), @@ -2411,20 +2638,17 @@ static void bnx2x_self_test(struct net_device *dev, } is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; - + link_up = bp->link_vars.link_up; /* offline tests are not supported in MF mode */ if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) { int port = BP_PORT(bp); u32 val; - u8 link_up; /* save current value of input enable for TX port IF */ val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4); /* disable input for TX port IF */ REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); - link_up = bp->link_vars.link_up; - bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); rc = bnx2x_nic_load(bp, LOAD_DIAG); if (rc) { @@ -2486,17 +2710,19 @@ static void bnx2x_self_test(struct net_device *dev, etest->flags |= ETH_TEST_FL_FAILED; } - if (bnx2x_link_test(bp, is_serdes) != 0) { + if (link_up) { + cnt = 100; + while (bnx2x_link_test(bp, is_serdes) && --cnt) + msleep(20); + } + + if (!cnt) { if (!IS_MF(bp)) buf[6] = 1; else buf[2] = 1; etest->flags |= ETH_TEST_FL_FAILED; } - -#ifdef BNX2X_EXTRA_DEBUG - bnx2x_panic_dump(bp); -#endif } #define IS_PORT_STAT(i) \ @@ -2753,15 +2979,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EINVAL; - } else { - return 0; } + return 0; case UDP_V4_FLOW: case UDP_V6_FLOW: /* For UDP either 2-tupple hash or 4-tupple hash is supported */ if (info->data == (RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3)) + RXH_L4_B_0_1 | RXH_L4_B_2_3)) udp_rss_requested = 1; else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) udp_rss_requested = 0; @@ -2781,9 +3006,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) "rss re-configured, UDP 4-tupple %s\n", udp_rss_requested ? "enabled" : "disabled"); return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); - } else { - return 0; } + return 0; + case IPV4_FLOW: case IPV6_FLOW: /* For IP only 2-tupple hash is supported */ @@ -2791,9 +3016,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EINVAL; - } else { - return 0; } + return 0; + case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -2809,9 +3034,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EINVAL; - } else { - return 0; } + return 0; + default: return -EINVAL; } @@ -2964,6 +3189,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .get_drvinfo = bnx2x_get_drvinfo, .get_regs_len = bnx2x_get_regs_len, .get_regs = bnx2x_get_regs, + .get_dump_flag = bnx2x_get_dump_flag, + .get_dump_data = bnx2x_get_dump_data, + .set_dump = bnx2x_set_dump, .get_wol = bnx2x_get_wol, .set_wol = bnx2x_set_wol, .get_msglevel = bnx2x_get_msglevel, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 60a83ad10370..e5f808377c91 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -1,6 +1,6 @@ /* bnx2x_fw_defs.h: Broadcom Everest network driver. * - * Copyright (c) 2007-2012 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -305,12 +305,10 @@ #define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */ #define MAX_VLAN_CREDIT_E2 272 /* Per Path */ - /* Maximal aggregation queues supported */ #define ETH_MAX_AGGREGATION_QUEUES_E1 32 #define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64 - #define ETH_NUM_OF_MCAST_BINS 256 #define ETH_NUM_OF_MCAST_ENGINES_E2 72 @@ -353,7 +351,6 @@ /* max number of slow path commands per port */ #define MAX_RAMRODS_PER_PORT 8 - /**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ #define TIMERS_TICK_SIZE_CHIP (1e-3) @@ -380,7 +377,6 @@ that is not mapped to priority*/ #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF - #define C_ERES_PER_PAGE \ (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem))) #define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1) @@ -391,8 +387,6 @@ #define INVALID_VNIC_ID 0xFF - #define UNDEF_IRO 0x80000000 - #endif /* BNX2X_FW_DEFS_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h index 4bed52ba300d..f572ae164fce 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h @@ -1,6 +1,6 @@ /* bnx2x_fw_file_hdr.h: FW binary file header structure. * - * Copyright (c) 2007-2012 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 3369a50ac6b4..037860ecc343 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1,6 +1,6 @@ /* bnx2x_hsi.h: Broadcom Everest network driver. * - * Copyright (c) 2007-2012 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -899,6 +899,10 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000 #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK 0x00000C00 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE 0x00000400 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI 0x00000800 + #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000 #define PORT_FEATURE_EN_SIZE_SHIFT 24 #define PORT_FEATURE_WOL_ENABLED 0x01000000 @@ -3374,6 +3378,10 @@ struct regpair { __le32 hi; }; +struct regpair_native { + u32 lo; + u32 hi; +}; /* * Classify rule opcodes in E2/E3 @@ -4400,13 +4408,13 @@ struct tstorm_eth_function_common_config { * MAC filtering configuration parameters per port in Tstorm */ struct tstorm_eth_mac_filter_config { - __le32 ucast_drop_all; - __le32 ucast_accept_all; - __le32 mcast_drop_all; - __le32 mcast_accept_all; - __le32 bcast_accept_all; - __le32 vlan_filter[2]; - __le32 unmatched_unicast; + u32 ucast_drop_all; + u32 ucast_accept_all; + u32 mcast_drop_all; + u32 mcast_accept_all; + u32 bcast_accept_all; + u32 vlan_filter[2]; + u32 unmatched_unicast; }; @@ -4898,7 +4906,7 @@ union event_data { * per PF event ring data */ struct event_ring_data { - struct regpair base_addr; + struct regpair_native base_addr; #if defined(__BIG_ENDIAN) u8 index_id; u8 sb_id; @@ -5131,7 +5139,7 @@ struct pci_entity { * The fast-path status block meta-data, common to all chips */ struct hc_sb_data { - struct regpair host_sb_addr; + struct regpair_native host_sb_addr; struct hc_status_block_sm state_machine[HC_SB_MAX_SM]; struct pci_entity p_func; #if defined(__BIG_ENDIAN) @@ -5145,7 +5153,7 @@ struct hc_sb_data { u8 state; u8 rsrv0; #endif - struct regpair rsrv1[2]; + struct regpair_native rsrv1[2]; }; @@ -5163,7 +5171,7 @@ enum hc_segment { * The fast-path status block meta-data */ struct hc_sp_status_block_data { - struct regpair host_sb_addr; + struct regpair_native host_sb_addr; #if defined(__BIG_ENDIAN) u8 rsrv1; u8 state; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h index c8f10f0e8a0d..76df015f486a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -1,7 +1,7 @@ /* bnx2x_init.h: Broadcom Everest network driver. * Structures and macroes needed during the initialization. * - * Copyright (c) 2007-2012 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h index d755acfe7a40..8ab0dd900960 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h @@ -2,7 +2,7 @@ * Static functions needed during the initialization. * This file is "included" in bnx2x_main.c. * - * Copyright (c) 2007-2012 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -218,7 +218,7 @@ static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, /* gunzip_outlen is in dwords */ len = GUNZIP_OUTLEN(bp); for (i = 0; i < len; i++) - ((u32 *)GUNZIP_BUF(bp))[i] = + ((u32 *)GUNZIP_BUF(bp))[i] = (__force u32) cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]); bnx2x_write_big_buf_wb(bp, addr, len); @@ -232,7 +232,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) u16 op_end = INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_END)]; - union init_op *op; + const union init_op *op; u32 op_idx, op_type, addr, len; const u32 *data, *data_base; @@ -244,7 +244,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) for (op_idx = op_start; op_idx < op_end; op_idx++) { - op = (union init_op *)&(INIT_OPS(bp)[op_idx]); + op = (const union init_op *)&(INIT_OPS(bp)[op_idx]); /* Get generic data */ op_type = op->raw.op; addr = op->raw.offset; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 09096b43a6e9..c6da77fa9d07 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -1,4 +1,4 @@ -/* Copyright 2008-2012 Broadcom Corporation +/* Copyright 2008-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -3659,7 +3659,7 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6)); - for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++) + for (i = 0; i < ARRAY_SIZE(reg_set); i++) bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); @@ -3713,7 +3713,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, }; DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); /* Set to default registers that may be overriden by 10G force */ - for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++) + for (i = 0; i < ARRAY_SIZE(reg_set); i++) bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); @@ -3854,7 +3854,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2} }; - for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++) + for (i = 0; i < ARRAY_SIZE(reg_set); i++) bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); @@ -4242,7 +4242,7 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy, bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, (3<<13)); - for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++) + for (i = 0; i < ARRAY_SIZE(wc_regs); i++) bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg, wc_regs[i].val); @@ -4748,6 +4748,12 @@ void bnx2x_link_status_update(struct link_params *params, vars->link_status = REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, port_mb[port].link_status)); + + /* Force link UP in non LOOPBACK_EXT loopback mode(s) */ + if (bp->link_params.loopback_mode != LOOPBACK_NONE && + bp->link_params.loopback_mode != LOOPBACK_EXT) + vars->link_status |= LINK_STATUS_LINK_UP; + if (bnx2x_eee_has_cap(params)) vars->eee_status = REG_RD(bp, params->shmem2_base + offsetof(struct shmem2_region, @@ -9520,7 +9526,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, } else { /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ - for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); + for (i = 0; i < ARRAY_SIZE(reg_set); i++) bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); @@ -9592,7 +9598,7 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, val); - for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++) + for (i = 0; i < ARRAY_SIZE(reg_set); i++) bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); @@ -13395,7 +13401,7 @@ static void bnx2x_disable_kr2(struct link_params *params, }; DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n"); - for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++) + for (i = 0; i < ARRAY_SIZE(reg_set); i++) bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index ee6e7ec85457..d25c7d79787a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -1,4 +1,4 @@ -/* Copyright 2008-2012 Broadcom Corporation +/* Copyright 2008-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5523da3afcdc..c4daee1b7286 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1,6 +1,6 @@ /* bnx2x_main.c: Broadcom Everest network driver. * - * Copyright (c) 2007-2012 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -59,6 +59,7 @@ #include "bnx2x_init.h" #include "bnx2x_init_ops.h" #include "bnx2x_cmn.h" +#include "bnx2x_vfpf.h" #include "bnx2x_dcb.h" #include "bnx2x_sp.h" @@ -144,39 +145,49 @@ enum bnx2x_board_type { BCM57711E, BCM57712, BCM57712_MF, + BCM57712_VF, BCM57800, BCM57800_MF, + BCM57800_VF, BCM57810, BCM57810_MF, - BCM57840_O, + BCM57810_VF, BCM57840_4_10, BCM57840_2_20, - BCM57840_MFO, BCM57840_MF, + BCM57840_VF, BCM57811, - BCM57811_MF + BCM57811_MF, + BCM57840_O, + BCM57840_MFO, + BCM57811_VF }; /* indexed by board_type, above */ static struct { char *name; } board_info[] = { - { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, - { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, - { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, - { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, - { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, - { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, - { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"}, - { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"}, - { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"}, - { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"}, + [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, + [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, + [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, + [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, + [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, + [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" }, + [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, + [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, + [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" }, + [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, + [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, + [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" }, + [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" }, + [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" }, + [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, + [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }, + [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" }, + [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" }, + [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, + [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, + [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" } }; #ifndef PCI_DEVICE_ID_NX2_57710 @@ -194,12 +205,18 @@ static struct { #ifndef PCI_DEVICE_ID_NX2_57712_MF #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF #endif +#ifndef PCI_DEVICE_ID_NX2_57712_VF +#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF +#endif #ifndef PCI_DEVICE_ID_NX2_57800 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 #endif #ifndef PCI_DEVICE_ID_NX2_57800_MF #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF #endif +#ifndef PCI_DEVICE_ID_NX2_57800_VF +#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF +#endif #ifndef PCI_DEVICE_ID_NX2_57810 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 #endif @@ -209,6 +226,9 @@ static struct { #ifndef PCI_DEVICE_ID_NX2_57840_O #define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE #endif +#ifndef PCI_DEVICE_ID_NX2_57810_VF +#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF +#endif #ifndef PCI_DEVICE_ID_NX2_57840_4_10 #define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10 #endif @@ -221,29 +241,41 @@ static struct { #ifndef PCI_DEVICE_ID_NX2_57840_MF #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF #endif +#ifndef PCI_DEVICE_ID_NX2_57840_VF +#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF +#endif #ifndef PCI_DEVICE_ID_NX2_57811 #define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811 #endif #ifndef PCI_DEVICE_ID_NX2_57811_MF #define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF #endif +#ifndef PCI_DEVICE_ID_NX2_57811_VF +#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF +#endif + static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF }, { 0 } }; @@ -346,6 +378,65 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" #define DMAE_DP_DST_NONE "dst_addr [none]" +void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl) +{ + u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; + + switch (dmae->opcode & DMAE_COMMAND_DST) { + case DMAE_CMD_DST_PCI: + if (src_type == DMAE_CMD_SRC_PCI) + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, + dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + else + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%08x], len [%d*4], dst [%x:%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_lo >> 2, + dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + break; + case DMAE_CMD_DST_GRC: + if (src_type == DMAE_CMD_SRC_PCI) + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, + dmae->len, dmae->dst_addr_lo >> 2, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + else + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%08x], len [%d*4], dst [%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_lo >> 2, + dmae->len, dmae->dst_addr_lo >> 2, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + break; + default: + if (src_type == DMAE_CMD_SRC_PCI) + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" + "comp_addr [%x:%08x] comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, + dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + else + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src_addr [%08x] len [%d * 4] dst_addr [none]\n" + "comp_addr [%x:%08x] comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_lo >> 2, + dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + break; + } +} /* copy command into DMAE command memory and set DMAE command go */ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) @@ -396,7 +487,7 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, return opcode; } -static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, +void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, u8 src_type, u8 dst_type) { @@ -412,9 +503,8 @@ static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, dmae->comp_val = DMAE_COMP_VAL; } -/* issue a dmae command over the init-channel and wailt for completion */ -static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, - struct dmae_command *dmae) +/* issue a dmae command over the init-channel and wait for completion */ +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) { u32 *wb_comp = bnx2x_sp(bp, wb_comp); int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; @@ -692,12 +782,16 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); printk("%s", lvl); + + /* dump buffer after the mark */ for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { for (word = 0; word < 8; word++) data[word] = htonl(REG_RD(bp, offset + 4*word)); data[8] = 0x0; pr_cont("%s", (char *)data); } + + /* dump buffer before the mark */ for (offset = addr + 4; offset <= mark; offset += 0x8*4) { for (word = 0; word < 8; word++) data[word] = htonl(REG_RD(bp, offset + 4*word)); @@ -712,7 +806,71 @@ static void bnx2x_fw_dump(struct bnx2x *bp) bnx2x_fw_dump_lvl(bp, KERN_ERR); } -void bnx2x_panic_dump(struct bnx2x *bp) +static void bnx2x_hc_int_disable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; + u32 val = REG_RD(bp, addr); + + /* in E1 we must use only PCI configuration space to disable + * MSI/MSIX capablility + * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block + */ + if (CHIP_IS_E1(bp)) { + /* Since IGU_PF_CONF_MSI_MSIX_EN still always on + * Use mask register to prevent from HC sending interrupts + * after we exit the function + */ + REG_WR(bp, HC_REG_INT_MASK + port*4, 0); + + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + + DP(NETIF_MSG_IFDOWN, + "write %x to HC %d (addr 0x%x)\n", + val, port, addr); + + /* flush all outstanding writes */ + mmiowb(); + + REG_WR(bp, addr, val); + if (REG_RD(bp, addr) != val) + BNX2X_ERR("BUG! proper val not read from IGU!\n"); +} + +static void bnx2x_igu_int_disable(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); + + val &= ~(IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_INT_LINE_EN | + IGU_PF_CONF_ATTN_BIT_EN); + + DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); + + /* flush all outstanding writes */ + mmiowb(); + + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) + BNX2X_ERR("BUG! proper val not read from IGU!\n"); +} + +static void bnx2x_int_disable(struct bnx2x *bp) +{ + if (bp->common.int_block == INT_BLOCK_HC) + bnx2x_hc_int_disable(bp); + else + bnx2x_igu_int_disable(bp); +} + +void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) { int i; u16 j; @@ -722,6 +880,8 @@ void bnx2x_panic_dump(struct bnx2x *bp) u16 start = 0, end = 0; u8 cos; #endif + if (disable_int) + bnx2x_int_disable(bp); bp->stats_state = STATS_STATE_DISABLED; bp->eth_stats.unrecoverable_error++; @@ -867,6 +1027,17 @@ void bnx2x_panic_dump(struct bnx2x *bp) } #ifdef BNX2X_STOP_ON_ERROR + + /* event queue */ + for (i = 0; i < NUM_EQ_DESC; i++) { + u32 *data = (u32 *)&bp->eq_ring[i].message.data; + + BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", + i, bp->eq_ring[i].message.opcode, + bp->eq_ring[i].message.error); + BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]); + } + /* Rings */ /* Rx */ for_each_valid_rx_queue(bp, i) { @@ -1038,8 +1209,8 @@ static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, return val; } -static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, - char *msg, u32 poll_cnt) +int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, + char *msg, u32 poll_cnt) { u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); if (val != 0) { @@ -1049,7 +1220,8 @@ static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, return 0; } -static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) +/* Common routines with VF FLR cleanup */ +u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) { /* adjust polling timeout */ if (CHIP_REV_IS_EMUL(bp)) @@ -1061,7 +1233,7 @@ static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) return FLR_POLL_CNT; } -static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) +void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) { struct pbf_pN_cmd_regs cmd_regs[] = { {0, (CHIP_IS_E3B0(bp)) ? @@ -1136,10 +1308,9 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) -static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, - u32 poll_cnt) +int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) { - struct sdm_op_gen op_gen = {0}; + u32 op_gen_command = 0; u32 comp_addr = BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); @@ -1150,19 +1321,20 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, return 1; } - op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); - op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); - op_gen.command |= OP_GEN_AGG_VECT(clnup_func); - op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; + op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); + op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); + op_gen_command |= OP_GEN_AGG_VECT(clnup_func); + op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); - REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command); + REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command); if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { BNX2X_ERR("FW final cleanup did not succeed\n"); DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", (REG_RD(bp, comp_addr))); - ret = 1; + bnx2x_panic(); + return 1; } /* Zero completion for nxt FLR */ REG_WR(bp, comp_addr, 0); @@ -1170,7 +1342,7 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, return ret; } -static u8 bnx2x_is_pcie_pending(struct pci_dev *dev) +u8 bnx2x_is_pcie_pending(struct pci_dev *dev) { u16 status; @@ -1382,26 +1554,31 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp) if (msix) { val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN); - val |= (IGU_PF_CONF_FUNC_EN | - IGU_PF_CONF_MSI_MSIX_EN | + val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); if (single_msix) val |= IGU_PF_CONF_SINGLE_ISR_EN; } else if (msi) { val &= ~IGU_PF_CONF_INT_LINE_EN; - val |= (IGU_PF_CONF_FUNC_EN | - IGU_PF_CONF_MSI_MSIX_EN | + val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } else { val &= ~IGU_PF_CONF_MSI_MSIX_EN; - val |= (IGU_PF_CONF_FUNC_EN | - IGU_PF_CONF_INT_LINE_EN | + val |= (IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } + /* Clean previous status - need to configure igu prior to ack*/ + if ((!msix) || single_msix) { + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + bnx2x_ack_int(bp); + } + + val |= IGU_PF_CONF_FUNC_EN; + DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); @@ -1436,71 +1613,6 @@ void bnx2x_int_enable(struct bnx2x *bp) bnx2x_igu_int_enable(bp); } -static void bnx2x_hc_int_disable(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; - u32 val = REG_RD(bp, addr); - - /* - * in E1 we must use only PCI configuration space to disable - * MSI/MSIX capablility - * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block - */ - if (CHIP_IS_E1(bp)) { - /* Since IGU_PF_CONF_MSI_MSIX_EN still always on - * Use mask register to prevent from HC sending interrupts - * after we exit the function - */ - REG_WR(bp, HC_REG_INT_MASK + port*4, 0); - - val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | - HC_CONFIG_0_REG_INT_LINE_EN_0 | - HC_CONFIG_0_REG_ATTN_BIT_EN_0); - } else - val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | - HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | - HC_CONFIG_0_REG_INT_LINE_EN_0 | - HC_CONFIG_0_REG_ATTN_BIT_EN_0); - - DP(NETIF_MSG_IFDOWN, - "write %x to HC %d (addr 0x%x)\n", - val, port, addr); - - /* flush all outstanding writes */ - mmiowb(); - - REG_WR(bp, addr, val); - if (REG_RD(bp, addr) != val) - BNX2X_ERR("BUG! proper val not read from IGU!\n"); -} - -static void bnx2x_igu_int_disable(struct bnx2x *bp) -{ - u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); - - val &= ~(IGU_PF_CONF_MSI_MSIX_EN | - IGU_PF_CONF_INT_LINE_EN | - IGU_PF_CONF_ATTN_BIT_EN); - - DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); - - /* flush all outstanding writes */ - mmiowb(); - - REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); - if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) - BNX2X_ERR("BUG! proper val not read from IGU!\n"); -} - -static void bnx2x_int_disable(struct bnx2x *bp) -{ - if (bp->common.int_block == INT_BLOCK_HC) - bnx2x_hc_int_disable(bp); - else - bnx2x_igu_int_disable(bp); -} - void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) { int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; @@ -1586,11 +1698,11 @@ static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) } /** - * bnx2x_trylock_leader_lock- try to aquire a leader lock. + * bnx2x_trylock_leader_lock- try to acquire a leader lock. * * @bp: driver handle * - * Tries to aquire a leader lock for current engine. + * Tries to acquire a leader lock for current engine. */ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) { @@ -1599,6 +1711,24 @@ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); +/* schedule the sp task and mark that interrupt occurred (runs from ISR) */ +static int bnx2x_schedule_sp_task(struct bnx2x *bp) +{ + /* Set the interrupt occurred bit for the sp-task to recognize it + * must ack the interrupt and transition according to the IGU + * state machine. + */ + atomic_set(&bp->interrupt_occurred, 1); + + /* The sp_task must execute only after this bit + * is set, otherwise we will get out of sync and miss all + * further interrupts. Hence, the barrier. + */ + smp_wmb(); + + /* schedule sp_task to workqueue */ + return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); +} void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) { @@ -1613,6 +1743,13 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.ramrod_type); + /* If cid is within VF range, replace the slowpath object with the + * one corresponding to this VF + */ + if (cid >= BNX2X_FIRST_VF_CID && + cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS) + bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj); + switch (command) { case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); @@ -1664,6 +1801,8 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) #else return; #endif + /* SRIOV: reschedule any 'in_progress' operations */ + bnx2x_iov_sp_event(bp, cid, true); smp_mb__before_atomic_inc(); atomic_inc(&bp->cq_spq_left); @@ -1680,7 +1819,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) * mark pending ACK to MCP bit. * prevent case that both bits are cleared. * At the end of load/unload driver checks that - * sp_state is cleaerd, and this order prevents + * sp_state is cleared, and this order prevents * races */ smp_mb__before_clear_bit(); @@ -1689,22 +1828,13 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); smp_mb__after_clear_bit(); - /* schedule workqueue to send ack to MCP */ - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); + /* schedule the sp task as mcp ack is required */ + bnx2x_schedule_sp_task(bp); } return; } -void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) -{ - u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset; - - bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod, - start); -} - irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) { struct bnx2x *bp = netdev_priv(dev_instance); @@ -1745,21 +1875,23 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) if (status & (mask | 0x1)) { struct cnic_ops *c_ops = NULL; - if (likely(bp->state == BNX2X_STATE_OPEN)) { - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); - if (c_ops) - c_ops->cnic_handler(bp->cnic_data, - NULL); - rcu_read_unlock(); - } + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops && (bp->cnic_eth_dev.drv_state & + CNIC_DRV_STATE_HANDLES_IRQ)) + c_ops->cnic_handler(bp->cnic_data, NULL); + rcu_read_unlock(); status &= ~mask; } } if (unlikely(status & 0x1)) { - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); + + /* schedule sp task to perform default status block work, ack + * attentions and enable interrupts. + */ + bnx2x_schedule_sp_task(bp); status &= ~0x1; if (!status) @@ -2459,23 +2591,55 @@ void bnx2x__link_status_update(struct bnx2x *bp) return; /* read updated dcb configuration */ - bnx2x_dcbx_pmf_update(bp); - - bnx2x_link_status_update(&bp->link_params, &bp->link_vars); + if (IS_PF(bp)) { + bnx2x_dcbx_pmf_update(bp); + bnx2x_link_status_update(&bp->link_params, &bp->link_vars); + if (bp->link_vars.link_up) + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + else + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + /* indicate link status */ + bnx2x_link_report(bp); - if (bp->link_vars.link_up) + } else { /* VF */ + bp->port.supported[0] |= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_2500baseX_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + bp->port.advertising[0] = bp->port.supported[0]; + + bp->link_params.bp = bp; + bp->link_params.port = BP_PORT(bp); + bp->link_params.req_duplex[0] = DUPLEX_FULL; + bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE; + bp->link_params.req_line_speed[0] = SPEED_10000; + bp->link_params.speed_cap_mask[0] = 0x7f0000; + bp->link_params.switch_cfg = SWITCH_CFG_10G; + bp->link_vars.mac_type = MAC_TYPE_BMAC; + bp->link_vars.line_speed = SPEED_10000; + bp->link_vars.link_status = + (LINK_STATUS_LINK_UP | + LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); + bp->link_vars.link_up = 1; + bp->link_vars.duplex = DUPLEX_FULL; + bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; + __bnx2x_link_report(bp); bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); - else - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - - /* indicate link status */ - bnx2x_link_report(bp); + } } static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, u16 vlan_val, u8 allowed_prio) { - struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_state_params func_params = {NULL}; struct bnx2x_func_afex_update_params *f_update_params = &func_params.params.afex_update; @@ -2500,7 +2664,7 @@ static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, u16 vif_index, u8 func_bit_map) { - struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_state_params func_params = {NULL}; struct bnx2x_func_afex_viflists_params *update_params = &func_params.params.afex_viflists; int rc; @@ -2516,7 +2680,7 @@ static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, /* set parameters according to cmd_type */ update_params->afex_vif_list_command = cmd_type; - update_params->vif_list_index = cpu_to_le16(vif_index); + update_params->vif_list_index = vif_index; update_params->func_bit_map = (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map; update_params->func_to_clear = 0; @@ -2800,6 +2964,10 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); +#ifdef BNX2X_STOP_ON_ERROR + __set_bit(BNX2X_Q_FLG_TX_SEC, &flags); +#endif + return flags; } @@ -2875,15 +3043,12 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, pause->sge_th_hi + FW_PREFETCH_CNT > MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); - tpa_agg_size = min_t(u32, - (min_t(u32, 8, MAX_SKB_FRAGS) * - SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); + tpa_agg_size = TPA_AGG_SIZE; max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; max_sge = ((max_sge + PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; - sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE, - 0xffff); + sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff); } /* pause - not for e1 */ @@ -2928,7 +3093,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, /* Maximum number or simultaneous TPA aggregation for this Queue. * - * For PF Clients it should be the maximum avaliable number. + * For PF Clients it should be the maximum available number. * VF driver(s) may want to define it to a smaller value. */ rxq_init->max_tpa_queues = MAX_AGG_QS(bp); @@ -3022,7 +3187,7 @@ static void bnx2x_pf_init(struct bnx2x *bp) if (bp->port.pmf) storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); - /* init Event Queue */ + /* init Event Queue - PCI bus guarantees correct endianity*/ eq_data.base_addr.hi = U64_HI(bp->eq_mapping); eq_data.base_addr.lo = U64_LO(bp->eq_mapping); eq_data.producer = bp->eq_prod; @@ -3112,65 +3277,75 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) struct fcoe_statistics_params *fw_fcoe_stat = &bp->fw_stats_data->fcoe; - ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo, - fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); + ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0, + fcoe_stat->rx_bytes_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); - ADD_64(fcoe_stat->rx_bytes_hi, - fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, - fcoe_stat->rx_bytes_lo, - fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); + ADD_64_LE(fcoe_stat->rx_bytes_hi, + fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, + fcoe_stat->rx_bytes_lo, + fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); - ADD_64(fcoe_stat->rx_bytes_hi, - fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, - fcoe_stat->rx_bytes_lo, - fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); + ADD_64_LE(fcoe_stat->rx_bytes_hi, + fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, + fcoe_stat->rx_bytes_lo, + fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); - ADD_64(fcoe_stat->rx_bytes_hi, - fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, - fcoe_stat->rx_bytes_lo, - fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); + ADD_64_LE(fcoe_stat->rx_bytes_hi, + fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, + fcoe_stat->rx_bytes_lo, + fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); - ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, - fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); + ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, + fcoe_stat->rx_frames_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); - ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, - fcoe_q_tstorm_stats->rcv_ucast_pkts); + ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, + fcoe_stat->rx_frames_lo, + fcoe_q_tstorm_stats->rcv_ucast_pkts); - ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, - fcoe_q_tstorm_stats->rcv_bcast_pkts); + ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, + fcoe_stat->rx_frames_lo, + fcoe_q_tstorm_stats->rcv_bcast_pkts); - ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, - fcoe_q_tstorm_stats->rcv_mcast_pkts); + ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, + fcoe_stat->rx_frames_lo, + fcoe_q_tstorm_stats->rcv_mcast_pkts); - ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo, - fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); + ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0, + fcoe_stat->tx_bytes_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); - ADD_64(fcoe_stat->tx_bytes_hi, - fcoe_q_xstorm_stats->ucast_bytes_sent.hi, - fcoe_stat->tx_bytes_lo, - fcoe_q_xstorm_stats->ucast_bytes_sent.lo); + ADD_64_LE(fcoe_stat->tx_bytes_hi, + fcoe_q_xstorm_stats->ucast_bytes_sent.hi, + fcoe_stat->tx_bytes_lo, + fcoe_q_xstorm_stats->ucast_bytes_sent.lo); - ADD_64(fcoe_stat->tx_bytes_hi, - fcoe_q_xstorm_stats->bcast_bytes_sent.hi, - fcoe_stat->tx_bytes_lo, - fcoe_q_xstorm_stats->bcast_bytes_sent.lo); + ADD_64_LE(fcoe_stat->tx_bytes_hi, + fcoe_q_xstorm_stats->bcast_bytes_sent.hi, + fcoe_stat->tx_bytes_lo, + fcoe_q_xstorm_stats->bcast_bytes_sent.lo); - ADD_64(fcoe_stat->tx_bytes_hi, - fcoe_q_xstorm_stats->mcast_bytes_sent.hi, - fcoe_stat->tx_bytes_lo, - fcoe_q_xstorm_stats->mcast_bytes_sent.lo); + ADD_64_LE(fcoe_stat->tx_bytes_hi, + fcoe_q_xstorm_stats->mcast_bytes_sent.hi, + fcoe_stat->tx_bytes_lo, + fcoe_q_xstorm_stats->mcast_bytes_sent.lo); - ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, - fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); + ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, + fcoe_stat->tx_frames_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); - ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, - fcoe_q_xstorm_stats->ucast_pkts_sent); + ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, + fcoe_stat->tx_frames_lo, + fcoe_q_xstorm_stats->ucast_pkts_sent); - ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, - fcoe_q_xstorm_stats->bcast_pkts_sent); + ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, + fcoe_stat->tx_frames_lo, + fcoe_q_xstorm_stats->bcast_pkts_sent); - ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, - fcoe_q_xstorm_stats->mcast_pkts_sent); + ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, + fcoe_stat->tx_frames_lo, + fcoe_q_xstorm_stats->mcast_pkts_sent); } /* ask L5 driver to add data to the struct */ @@ -3641,7 +3816,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp) "Please contact OEM Support for assistance\n"); /* - * Scheudle device reset (unload) + * Schedule device reset (unload) * This is due to some boards consuming sufficient power when driver is * up to overheat if fan fails. */ @@ -3791,6 +3966,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) if (val & DRV_STATUS_DRV_INFO_REQ) bnx2x_handle_drv_info_req(bp); + + if (val & DRV_STATUS_VF_DISABLED) + bnx2x_vf_handle_flr_event(bp); + if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) bnx2x_pmf_update(bp); @@ -4587,8 +4766,8 @@ static void bnx2x_attn_int(struct bnx2x *bp) void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, u16 index, u8 op, u8 update) { - u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; - + u32 igu_addr = bp->igu_base_addr; + igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, igu_addr); } @@ -4616,7 +4795,7 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", cid); - bnx2x_panic_dump(bp); + bnx2x_panic_dump(bp, false); } bnx2x_cnic_cfc_comp(bp, cid, err); return 0; @@ -4658,7 +4837,8 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, /* Always push next commands out, don't wait here */ __set_bit(RAMROD_CONT, &ramrod_flags); - switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { + switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo) + >> BNX2X_SWCID_SHIFT) { case BNX2X_FILTER_MAC_PENDING: DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) @@ -4735,7 +4915,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp) struct bnx2x_queue_update_params *q_update_params = &queue_params.params.update; - /* Send Q update command with afex vlan removal values for all Qs */ + /* Send Q update command with afex vlan removal values for all Qs */ queue_params.cmd = BNX2X_Q_CMD_UPDATE; /* set silent vlan removal values according to vlan mode */ @@ -4809,7 +4989,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) u8 echo; u32 cid; u8 opcode; - int spqe_cnt = 0; + int rc, spqe_cnt = 0; struct bnx2x_queue_sp_obj *q_obj; struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; @@ -4837,15 +5017,27 @@ static void bnx2x_eq_int(struct bnx2x *bp) for (; sw_cons != hw_cons; sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { - elem = &bp->eq_ring[EQ_DESC(sw_cons)]; - cid = SW_CID(elem->message.data.cfc_del_event.cid); - opcode = elem->message.opcode; + rc = bnx2x_iov_eq_sp_event(bp, elem); + if (!rc) { + DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n", + rc); + goto next_spqe; + } + /* elem CID originates from FW; actually LE */ + cid = SW_CID((__force __le32) + elem->message.data.cfc_del_event.cid); + opcode = elem->message.opcode; /* handle eq element */ switch (opcode) { + case EVENT_RING_OPCODE_VF_PF_CHANNEL: + DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n"); + bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event); + continue; + case EVENT_RING_OPCODE_STAT_QUERY: DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, "got statistics comp event %d\n", @@ -5011,50 +5203,65 @@ next_spqe: static void bnx2x_sp_task(struct work_struct *work) { struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); - u16 status; - status = bnx2x_update_dsb_idx(bp); -/* if (status == 0) */ -/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ + DP(BNX2X_MSG_SP, "sp task invoked\n"); - DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status); + /* make sure the atomic interupt_occurred has been written */ + smp_rmb(); + if (atomic_read(&bp->interrupt_occurred)) { - /* HW attentions */ - if (status & BNX2X_DEF_SB_ATT_IDX) { - bnx2x_attn_int(bp); - status &= ~BNX2X_DEF_SB_ATT_IDX; - } + /* what work needs to be performed? */ + u16 status = bnx2x_update_dsb_idx(bp); - /* SP events: STAT_QUERY and others */ - if (status & BNX2X_DEF_SB_IDX) { - struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); + DP(BNX2X_MSG_SP, "status %x\n", status); + DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n"); + atomic_set(&bp->interrupt_occurred, 0); + + /* HW attentions */ + if (status & BNX2X_DEF_SB_ATT_IDX) { + bnx2x_attn_int(bp); + status &= ~BNX2X_DEF_SB_ATT_IDX; + } + + /* SP events: STAT_QUERY and others */ + if (status & BNX2X_DEF_SB_IDX) { + struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); if (FCOE_INIT(bp) && - (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { - /* - * Prevent local bottom-halves from running as - * we are going to change the local NAPI list. - */ - local_bh_disable(); - napi_schedule(&bnx2x_fcoe(bp, napi)); - local_bh_enable(); + (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + /* Prevent local bottom-halves from running as + * we are going to change the local NAPI list. + */ + local_bh_disable(); + napi_schedule(&bnx2x_fcoe(bp, napi)); + local_bh_enable(); + } + + /* Handle EQ completions */ + bnx2x_eq_int(bp); + bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, + le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); + + status &= ~BNX2X_DEF_SB_IDX; } - /* Handle EQ completions */ - bnx2x_eq_int(bp); + /* if status is non zero then perhaps something went wrong */ + if (unlikely(status)) + DP(BNX2X_MSG_SP, + "got an unknown interrupt! (status 0x%x)\n", status); - bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, - le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); + /* ack status block only if something was actually handled */ + bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, + le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); - status &= ~BNX2X_DEF_SB_IDX; } - if (unlikely(status)) - DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n", - status); - - bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, - le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); + /* must be called after the EQ processing (since eq leads to sriov + * ramrod completion flows). + * This flow may have been scheduled by the arrival of a ramrod + * completion, or by the sriov code rescheduling itself. + */ + bnx2x_iov_sp_task(bp); /* afex - poll to check if VIFSET_ACK should be sent to MFW */ if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, @@ -5087,7 +5294,10 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) rcu_read_unlock(); } - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); + /* schedule sp task to perform default status block work, ack + * attentions and enable interrupts. + */ + bnx2x_schedule_sp_task(bp); return IRQ_HANDLED; } @@ -5101,7 +5311,6 @@ void bnx2x_drv_pulse(struct bnx2x *bp) bp->fw_drv_pulse_wr_seq); } - static void bnx2x_timer(unsigned long data) { struct bnx2x *bp = (struct bnx2x *) data; @@ -5109,7 +5318,8 @@ static void bnx2x_timer(unsigned long data) if (!netif_running(bp->dev)) return; - if (!BP_NOMCP(bp)) { + if (IS_PF(bp) && + !BP_NOMCP(bp)) { int mb_idx = BP_FW_MB_IDX(bp); u32 drv_pulse; u32 mcp_pulse; @@ -5136,6 +5346,10 @@ static void bnx2x_timer(unsigned long data) if (bp->state == BNX2X_STATE_OPEN) bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); + /* sample pf vf bulletin board for new posts from pf */ + if (IS_VF(bp)) + bnx2x_sample_bulletin(bp); + mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -5278,7 +5492,7 @@ static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; } -static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, +void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, u8 vf_valid, int fw_sb_id, int igu_sb_id) { int igu_seg_id; @@ -5334,7 +5548,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); - /* write indecies to HW */ + /* write indices to HW - PCI guarantees endianity of regpairs */ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); } @@ -5422,6 +5636,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) bnx2x_zero_sp_sb(bp); + /* PCI guarantees endianity of regpairs */ sp_sb_data.state = SB_ENABLED; sp_sb_data.host_sb_addr.lo = U64_LO(section); sp_sb_data.host_sb_addr.hi = U64_HI(section); @@ -5478,13 +5693,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp) min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); } - /* called with netif_addr_lock_bh() */ -void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, - unsigned long rx_mode_flags, - unsigned long rx_accept_flags, - unsigned long tx_accept_flags, - unsigned long ramrod_flags) +int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, + unsigned long ramrod_flags) { struct bnx2x_rx_mode_ramrod_params ramrod_param; int rc; @@ -5514,22 +5728,21 @@ void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, rc = bnx2x_config_rx_mode(bp, &ramrod_param); if (rc < 0) { BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); - return; + return rc; } + + return 0; } -/* called with netif_addr_lock_bh() */ -void bnx2x_set_storm_rx_mode(struct bnx2x *bp) +static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, + unsigned long *rx_accept_flags, + unsigned long *tx_accept_flags) { - unsigned long rx_mode_flags = 0, ramrod_flags = 0; - unsigned long rx_accept_flags = 0, tx_accept_flags = 0; + /* Clear the flags first */ + *rx_accept_flags = 0; + *tx_accept_flags = 0; - if (!NO_FCOE(bp)) - - /* Configure rx_mode of FCoE Queue */ - __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); - - switch (bp->rx_mode) { + switch (rx_mode) { case BNX2X_RX_MODE_NONE: /* * 'drop all' supersedes any accept flags that may have been @@ -5537,25 +5750,25 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) */ break; case BNX2X_RX_MODE_NORMAL: - __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ - __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); break; case BNX2X_RX_MODE_ALLMULTI: - __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ - __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); break; case BNX2X_RX_MODE_PROMISC: @@ -5563,36 +5776,57 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) * should receive matched and unmatched (in resolution of port) * unicast packets. */ - __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); if (IS_MF_SI(bp)) - __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags); else - __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); break; default: - BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode); - return; + BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode); + return -EINVAL; } + /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ if (bp->rx_mode != BNX2X_RX_MODE_NONE) { - __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); } + return 0; +} + +/* called with netif_addr_lock_bh() */ +int bnx2x_set_storm_rx_mode(struct bnx2x *bp) +{ + unsigned long rx_mode_flags = 0, ramrod_flags = 0; + unsigned long rx_accept_flags = 0, tx_accept_flags = 0; + int rc; + + if (!NO_FCOE(bp)) + /* Configure rx_mode of FCoE Queue */ + __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); + + rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags, + &tx_accept_flags); + if (rc) + return rc; + __set_bit(RAMROD_RX, &ramrod_flags); __set_bit(RAMROD_TX, &ramrod_flags); - bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags, - tx_accept_flags, ramrod_flags); + return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, + rx_accept_flags, tx_accept_flags, + ramrod_flags); } static void bnx2x_init_internal_common(struct bnx2x *bp) @@ -5699,6 +5933,13 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) cids[cos] = fp->txdata_ptr[cos]->cid; } + /* nothing more for vf to do here */ + if (IS_VF(bp)) + return; + + bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, + fp->fw_sb_id, fp->igu_sb_id); + bnx2x_update_fpsb_idx(fp); bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), q_type); @@ -5708,13 +5949,10 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) */ bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); - DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", - fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, - fp->igu_sb_id); - bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, - fp->fw_sb_id, fp->igu_sb_id); - - bnx2x_update_fpsb_idx(fp); + DP(NETIF_MSG_IFUP, + "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", + fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, + fp->igu_sb_id); } static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) @@ -5786,17 +6024,22 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) for_each_eth_queue(bp, i) bnx2x_init_eth_fp(bp, i); + + /* ensure status block indices were read */ + rmb(); + bnx2x_init_rx_rings(bp); + bnx2x_init_tx_rings(bp); + + if (IS_VF(bp)) + return; + /* Initialize MOD_ABS interrupts */ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, bp->common.shmem_base, bp->common.shmem2_base, BP_PORT(bp)); - /* ensure status block indices were read */ - rmb(); bnx2x_init_def_sb(bp); bnx2x_update_dsb_idx(bp); - bnx2x_init_rx_rings(bp); - bnx2x_init_tx_rings(bp); bnx2x_init_sp_ring(bp); bnx2x_init_eq_ring(bp); bnx2x_init_internal(bp, load_code); @@ -6236,49 +6479,6 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); } -static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num) -{ - u32 offset = 0; - - if (CHIP_IS_E1(bp)) - return; - if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX)) - return; - - switch (BP_ABS_FUNC(bp)) { - case 0: - offset = PXP2_REG_PGL_PRETEND_FUNC_F0; - break; - case 1: - offset = PXP2_REG_PGL_PRETEND_FUNC_F1; - break; - case 2: - offset = PXP2_REG_PGL_PRETEND_FUNC_F2; - break; - case 3: - offset = PXP2_REG_PGL_PRETEND_FUNC_F3; - break; - case 4: - offset = PXP2_REG_PGL_PRETEND_FUNC_F4; - break; - case 5: - offset = PXP2_REG_PGL_PRETEND_FUNC_F5; - break; - case 6: - offset = PXP2_REG_PGL_PRETEND_FUNC_F6; - break; - case 7: - offset = PXP2_REG_PGL_PRETEND_FUNC_F7; - break; - default: - return; - } - - REG_WR(bp, offset, pretend_func_num); - REG_RD(bp, offset); - DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num); -} - void bnx2x_pf_disable(struct bnx2x *bp) { u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); @@ -6322,7 +6522,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); /* - * take the UNDI lock to protect undi_unload flow from accessing + * take the RESET lock to protect undi_unload flow from accessing * registers while we're resetting the chip */ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); @@ -6452,7 +6652,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) * queues with "old" ILT addresses. * c. PF enable in the PGLC. * d. Clear the was_error of the PF in the PGLC. (could have - * occured while driver was down) + * occurred while driver was down) * e. PF enable in the CFC (WEAK + STRONG) * f. Timers scan enable * 3. PF driver unload flow: @@ -6493,7 +6693,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) /* Step 1: set zeroes to all ilt page entries with valid bit on * Step 2: set the timers first/last ilt entry to point * to the entire range to prevent ILT range error for 3rd/4th - * vnic (this code assumes existance of the vnic) + * vnic (this code assumes existence of the vnic) * * both steps performed by call to bnx2x_ilt_client_init_op() * with dummy TM client @@ -6510,7 +6710,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); } - REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); @@ -6535,6 +6734,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); + bnx2x_iov_init_dmae(bp); + /* clean the DMAE memory */ bp->dmae_ready = 1; bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); @@ -6991,7 +7192,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) } } - /* If SPIO5 is set to generate interrupts, enable it for this port */ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); if (val & MISC_SPIO_SPIO5) { @@ -7020,15 +7220,14 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) REG_WR_DMAE(bp, reg, wb_write, 2); } -static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, - u8 idu_sb_id, bool is_Pf) +void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) { u32 data, ctl, cnt = 100; u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; u32 sb_bit = 1 << (idu_sb_id%32); - u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; + u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; /* Not supported in BC mode */ @@ -7219,8 +7418,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) /* FLR cleanup - hmmm */ if (!CHIP_IS_E1x(bp)) { rc = bnx2x_pf_flr_clnup(bp); - if (rc) + if (rc) { + bnx2x_fw_dump(bp); return rc; + } } /* set MSI reconfigure capability */ @@ -7237,12 +7438,21 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) ilt = BP_ILT(bp); cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; + if (IS_SRIOV(bp)) + cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS; + cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start); + + /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes + * those of the VFs, so start line should be reset + */ + cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; for (i = 0; i < L2_ILT_LINES(bp); i++) { ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; ilt->lines[cdu_ilt_start + i].page_mapping = bp->context[i].cxt_mapping; ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; } + bnx2x_ilt_init_op(bp, INITOP_SET); if (!CONFIGURE_NIC_MODE(bp)) { @@ -7315,6 +7525,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_TM, init_phase); bnx2x_init_block(bp, BLOCK_DORQ, init_phase); + + bnx2x_iov_init_dq(bp); + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); bnx2x_init_block(bp, BLOCK_PRS, init_phase); bnx2x_init_block(bp, BLOCK_TSDM, init_phase); @@ -7523,10 +7736,6 @@ void bnx2x_free_mem(struct bnx2x *bp) { int i; - /* fastpath */ - bnx2x_free_fp_mem(bp); - /* end of fastpath */ - BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, sizeof(struct host_sp_status_block)); @@ -7547,69 +7756,11 @@ void bnx2x_free_mem(struct bnx2x *bp) BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, BCM_PAGE_SIZE * NUM_EQ_PAGES); -} - -static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) -{ - int num_groups; - int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; - - /* number of queues for statistics is number of eth queues + FCoE */ - u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; - - /* Total number of FW statistics requests = - * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats + - * num of queues - */ - bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; - - - /* Request is built from stats_query_header and an array of - * stats_query_cmd_group each of which contains - * STATS_QUERY_CMD_COUNT rules. The real number or requests is - * configured in the stats_query_header. - */ - num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) + - (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0); - - bp->fw_stats_req_sz = sizeof(struct stats_query_header) + - num_groups * sizeof(struct stats_query_cmd_group); - - /* Data for statistics requests + stats_conter - * - * stats_counter holds per-STORM counters that are incremented - * when STORM has finished with the current request. - * - * memory for FCoE offloaded statistics are counted anyway, - * even if they will not be sent. - */ - bp->fw_stats_data_sz = sizeof(struct per_port_stats) + - sizeof(struct per_pf_stats) + - sizeof(struct fcoe_statistics_params) + - sizeof(struct per_queue_stats) * num_queue_stats + - sizeof(struct stats_counter); - - BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, - bp->fw_stats_data_sz + bp->fw_stats_req_sz); - /* Set shortcuts */ - bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; - bp->fw_stats_req_mapping = bp->fw_stats_mapping; - - bp->fw_stats_data = (struct bnx2x_fw_stats_data *) - ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); - - bp->fw_stats_data_mapping = bp->fw_stats_mapping + - bp->fw_stats_req_sz; - return 0; - -alloc_mem_err: - BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, - bp->fw_stats_data_sz + bp->fw_stats_req_sz); - BNX2X_ERR("Can't allocate memory\n"); - return -ENOMEM; + bnx2x_iov_free_mem(bp); } + int bnx2x_alloc_mem_cnic(struct bnx2x *bp) { if (!CHIP_IS_E1x(bp)) @@ -7655,10 +7806,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp) BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, sizeof(struct bnx2x_slowpath)); - /* Allocated memory for FW statistics */ - if (bnx2x_alloc_fw_stats_mem(bp)) - goto alloc_mem_err; - /* Allocate memory for CDU context: * This memory is allocated separately and not in the generic ILT * functions because CDU differs in few aspects: @@ -7687,6 +7834,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp) if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) goto alloc_mem_err; + if (bnx2x_iov_alloc_mem(bp)) + goto alloc_mem_err; + /* Slow path ring */ BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); @@ -7694,13 +7844,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp) BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, BCM_PAGE_SIZE * NUM_EQ_PAGES); - - /* fastpath */ - /* need to be done at the end, since it's self adjusting to amount - * of memory available for RSS queues - */ - if (bnx2x_alloc_fp_mem(bp)) - goto alloc_mem_err; return 0; alloc_mem_err: @@ -7803,43 +7946,53 @@ int bnx2x_setup_leading(struct bnx2x *bp) * * In case of MSI-X it will also try to enable MSI-X. */ -void bnx2x_set_int_mode(struct bnx2x *bp) +int bnx2x_set_int_mode(struct bnx2x *bp) { + int rc = 0; + + if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) + return -EINVAL; + switch (int_mode) { - case INT_MODE_MSI: + case BNX2X_INT_MODE_MSIX: + /* attempt to enable msix */ + rc = bnx2x_enable_msix(bp); + + /* msix attained */ + if (!rc) + return 0; + + /* vfs use only msix */ + if (rc && IS_VF(bp)) + return rc; + + /* failed to enable multiple MSI-X */ + BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", + bp->num_queues, + 1 + bp->num_cnic_queues); + + /* falling through... */ + case BNX2X_INT_MODE_MSI: bnx2x_enable_msi(bp); + /* falling through... */ - case INT_MODE_INTx: + case BNX2X_INT_MODE_INTX: bp->num_ethernet_queues = 1; bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; BNX2X_DEV_INFO("set number of queues to 1\n"); break; default: - /* if we can't use MSI-X we only need one fp, - * so try to enable MSI-X with the requested number of fp's - * and fallback to MSI or legacy INTx with one fp - */ - if (bnx2x_enable_msix(bp) || - bp->flags & USING_SINGLE_MSIX_FLAG) { - /* failed to enable multiple MSI-X */ - BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", - bp->num_queues, - 1 + bp->num_cnic_queues); - - bp->num_queues = 1 + bp->num_cnic_queues; - - /* Try to enable MSI */ - if (!(bp->flags & USING_SINGLE_MSIX_FLAG) && - !(bp->flags & DISABLE_MSI_FLAG)) - bnx2x_enable_msi(bp); - } - break; + BNX2X_DEV_INFO("unknown value in int_mode module parameter\n"); + return -EINVAL; } + return 0; } -/* must be called prioir to any HW initializations */ +/* must be called prior to any HW initializations */ static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) { + if (IS_SRIOV(bp)) + return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS; return L2_ILT_LINES(bp); } @@ -8222,8 +8375,8 @@ static void bnx2x_reset_func(struct bnx2x *bp) /* SP SB */ REG_WR8(bp, BAR_CSTRORM_INTMEM + - CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), - SB_DISABLED); + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), + SB_DISABLED); for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), @@ -8524,7 +8677,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) } /* Give HW time to discard old tx messages */ - usleep_range(1000, 1000); + usleep_range(1000, 2000); /* Clean all ETH MACs */ rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, @@ -8562,6 +8715,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) netif_addr_unlock_bh(bp->dev); + bnx2x_iov_chip_cleanup(bp); /* @@ -8947,7 +9101,7 @@ static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) if (pend_bits == 0) break; - usleep_range(1000, 1000); + usleep_range(1000, 2000); } while (cnt-- > 0); if (cnt <= 0) { @@ -8964,8 +9118,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) int cnt = 1000; u32 val = 0; u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; - u32 tags_63_32 = 0; - + u32 tags_63_32 = 0; /* Empty the Tetris buffer, wait for 1s */ do { @@ -8983,7 +9136,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) (pgl_exp_rom2 == 0xffffffff) && (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff))) break; - usleep_range(1000, 1000); + usleep_range(1000, 2000); } while (cnt-- > 0); if (cnt <= 0) { @@ -9016,7 +9169,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) /* Wait for 1ms to empty GLUE and PCI-E core queues, * PSWHST, GRC and PSWRD Tetris buffer. */ - usleep_range(1000, 1000); + usleep_range(1000, 2000); /* Prepare to chip reset: */ /* MCP */ @@ -9299,8 +9452,10 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) rtnl_lock(); - if (!netif_running(bp->dev)) - goto sp_rtnl_exit; + if (!netif_running(bp->dev)) { + rtnl_unlock(); + return; + } /* if stop on error is defined no recovery flows should be executed */ #ifdef BNX2X_STOP_ON_ERROR @@ -9319,7 +9474,8 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) bnx2x_parity_recover(bp); - goto sp_rtnl_exit; + rtnl_unlock(); + return; } if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { @@ -9333,7 +9489,8 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_nic_load(bp, LOAD_NORMAL); - goto sp_rtnl_exit; + rtnl_unlock(); + return; } #ifdef BNX2X_STOP_ON_ERROR sp_rtnl_not_reset: @@ -9351,13 +9508,33 @@ sp_rtnl_not_reset: DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); netif_device_detach(bp->dev); bnx2x_close(bp->dev); + rtnl_unlock(); + return; } -sp_rtnl_exit: + if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) { + DP(BNX2X_MSG_SP, + "sending set mcast vf pf channel message from rtnl sp-task\n"); + bnx2x_vfpf_set_mcast(bp->dev); + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, + &bp->sp_rtnl_state)) { + DP(BNX2X_MSG_SP, + "sending set storm rx mode vf pf channel message from rtnl sp-task\n"); + bnx2x_vfpf_storm_rx_mode(bp); + } + + /* work which needs rtnl lock not-taken (as it takes the lock itself and + * can be called from other contexts as well) + */ rtnl_unlock(); -} -/* end of nic load/unload */ + /* enable SR-IOV if applicable */ + if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, + &bp->sp_rtnl_state)) + bnx2x_enable_sriov(bp); +} static void bnx2x_period_task(struct work_struct *work) { @@ -9394,43 +9571,13 @@ period_task_exit: * Init service functions */ -static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) +u32 bnx2x_get_pretend_reg(struct bnx2x *bp) { u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; return base + (BP_ABS_FUNC(bp)) * stride; } -static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp) -{ - u32 reg = bnx2x_get_pretend_reg(bp); - - /* Flush all outstanding writes */ - mmiowb(); - - /* Pretend to be function 0 */ - REG_WR(bp, reg, 0); - REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */ - - /* From now we are in the "like-E1" mode */ - bnx2x_int_disable(bp); - - /* Flush all outstanding writes */ - mmiowb(); - - /* Restore the original function */ - REG_WR(bp, reg, BP_ABS_FUNC(bp)); - REG_RD(bp, reg); -} - -static inline void bnx2x_undi_int_disable(struct bnx2x *bp) -{ - if (CHIP_IS_E1(bp)) - bnx2x_int_disable(bp); - else - bnx2x_undi_int_disable_e1h(bp); -} - static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, struct bnx2x_mac_vals *vals) { @@ -9658,11 +9805,13 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp) if (bnx2x_prev_is_path_marked(bp)) return bnx2x_prev_mcp_done(bp); + BNX2X_DEV_INFO("Path is unmarked\n"); + /* If function has FLR capabilities, and existing FW version matches * the one required, then FLR will be sufficient to clean any residue * left by previous driver */ - rc = bnx2x_test_firmware_version(bp, false); + rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION); if (!rc) { /* fw version is good */ @@ -9718,7 +9867,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) /* Check if the UNDI driver was previously loaded * UNDI driver initializes CID offset for normal bell to 0x7 */ - reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST); if (tmp_reg == 0x7) { @@ -9726,6 +9874,8 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) prev_undi = true; /* clear the UNDI indication */ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); + /* clear possible idle check errors */ + REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); } } /* wait until BRB is empty */ @@ -9792,7 +9942,8 @@ static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp) if (!CHIP_IS_E1x(bp)) { u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { - BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); + DP(BNX2X_MSG_SP, + "'was error' bit was found to be set in pglueb upon startup. Clearing\n"); REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); } @@ -9834,7 +9985,6 @@ static int bnx2x_prev_unload(struct bnx2x *bp) REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); } - do { /* Lock MCP using an unload request */ fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); @@ -10401,10 +10551,10 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) { - mac_hi = cpu_to_be16(mac_hi); - mac_lo = cpu_to_be32(mac_lo); - memcpy(mac_buf, &mac_hi, sizeof(mac_hi)); - memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo)); + __be16 mac_hi_be = cpu_to_be16(mac_hi); + __be32 mac_lo_be = cpu_to_be32(mac_lo); + memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be)); + memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be)); } static void bnx2x_get_port_hwinfo(struct bnx2x *bp) @@ -10440,6 +10590,13 @@ static void bnx2x_get_port_hwinfo(struct bnx2x *bp) bp->wol = (!(bp->flags & NO_WOL_FLAG) && (config & PORT_FEATURE_WOL_ENABLED)); + if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == + PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp)) + bp->flags |= NO_ISCSI_FLAG; + if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == + PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp))) + bp->flags |= NO_FCOE_FLAG; + BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n", bp->link_params.lane_config, bp->link_params.speed_cap_mask[0], @@ -10547,21 +10704,21 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp) /* Port info */ bp->cnic_eth_dev.fcoe_wwn_port_name_hi = SHMEM_RD(bp, - dev_info.port_hw_config[port]. + dev_info.port_hw_config[port]. fcoe_wwn_port_name_upper); bp->cnic_eth_dev.fcoe_wwn_port_name_lo = SHMEM_RD(bp, - dev_info.port_hw_config[port]. + dev_info.port_hw_config[port]. fcoe_wwn_port_name_lower); /* Node info */ bp->cnic_eth_dev.fcoe_wwn_node_name_hi = SHMEM_RD(bp, - dev_info.port_hw_config[port]. + dev_info.port_hw_config[port]. fcoe_wwn_node_name_upper); bp->cnic_eth_dev.fcoe_wwn_node_name_lo = SHMEM_RD(bp, - dev_info.port_hw_config[port]. + dev_info.port_hw_config[port]. fcoe_wwn_node_name_lower); } else if (!IS_MF_SD(bp)) { /* @@ -10659,7 +10816,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) /* Zero primary MAC configuration */ memset(bp->dev->dev_addr, 0, ETH_ALEN); - if (IS_MF_FCOE_AFEX(bp)) + if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp)) /* use FIP MAC as primary MAC */ memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); @@ -10722,7 +10879,6 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) } memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); - memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) dev_err(&bp->pdev->dev, @@ -10787,7 +10943,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { tout--; - usleep_range(1000, 1000); + usleep_range(1000, 2000); } if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { @@ -11125,9 +11281,13 @@ static int bnx2x_init_bp(struct bnx2x *bp) INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); - rc = bnx2x_get_hwinfo(bp); - if (rc) - return rc; + if (IS_PF(bp)) { + rc = bnx2x_get_hwinfo(bp); + if (rc) + return rc; + } else { + random_ether_addr(bp->dev->dev_addr); + } bnx2x_set_modes_bitmap(bp); @@ -11140,7 +11300,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) func = BP_FUNC(bp); /* need to reset chip if undi was active */ - if (!BP_NOMCP(bp)) { + if (IS_PF(bp) && !BP_NOMCP(bp)) { /* init fw_seq */ bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & @@ -11177,6 +11337,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->mrrs = mrrs; bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; + if (IS_VF(bp)) + bp->rx_ring_size = MAX_RX_AVAIL; /* make sure that the numbers are in the right granularity */ bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; @@ -11205,12 +11367,18 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->cnic_base_cl_id = FP_SB_MAX_E2; /* multiple tx priority */ - if (CHIP_IS_E1x(bp)) + if (IS_VF(bp)) + bp->max_cos = 1; + else if (CHIP_IS_E1x(bp)) bp->max_cos = BNX2X_MULTI_TX_COS_E1X; - if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) + else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; - if (CHIP_IS_E3B0(bp)) + else if (CHIP_IS_E3B0(bp)) bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; + else + BNX2X_ERR("unknown chip %x revision %x\n", + CHIP_NUM(bp), CHIP_REV(bp)); + BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos); /* We need at least one default status block for slow-path events, * second status block for the L2 queue, and a third status block for @@ -11234,6 +11402,26 @@ static int bnx2x_init_bp(struct bnx2x *bp) * net_device service functions */ +static int bnx2x_open_epilog(struct bnx2x *bp) +{ + /* Enable sriov via delayed work. This must be done via delayed work + * because it causes the probe of the vf devices to be run, which invoke + * register_netdevice which must have rtnl lock taken. As we are holding + * the lock right now, that could only work if the probe would not take + * the lock. However, as the probe of the vf may be called from other + * contexts as well (such as passthrough to vm failes) it can't assume + * the lock is being held for it. Using delayed work here allows the + * probe code to simply take the lock (i.e. wait for it to be released + * if it is being held). + */ + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); + + return 0; +} + /* called with rtnl_lock */ static int bnx2x_open(struct net_device *dev) { @@ -11241,6 +11429,7 @@ static int bnx2x_open(struct net_device *dev) bool global = false; int other_engine = BP_PATH(bp) ? 0 : 1; bool other_load_status, load_status; + int rc; bp->stats_init = true; @@ -11248,53 +11437,57 @@ static int bnx2x_open(struct net_device *dev) bnx2x_set_power_state(bp, PCI_D0); - other_load_status = bnx2x_get_load_status(bp, other_engine); - load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); - - /* - * If parity had happen during the unload, then attentions + /* If parity had happen during the unload, then attentions * and/or RECOVERY_IN_PROGRES may still be set. In this case we * want the first function loaded on the current engine to * complete the recovery. + * Parity recovery is only relevant for PF driver. */ - if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || - bnx2x_chk_parity_attn(bp, &global, true)) - do { - /* - * If there are attentions and they are in a global - * blocks, set the GLOBAL_RESET bit regardless whether - * it will be this function that will complete the - * recovery or not. - */ - if (global) - bnx2x_set_reset_global(bp); + if (IS_PF(bp)) { + other_load_status = bnx2x_get_load_status(bp, other_engine); + load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); + if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || + bnx2x_chk_parity_attn(bp, &global, true)) { + do { + /* If there are attentions and they are in a + * global blocks, set the GLOBAL_RESET bit + * regardless whether it will be this function + * that will complete the recovery or not. + */ + if (global) + bnx2x_set_reset_global(bp); - /* - * Only the first function on the current engine should - * try to recover in open. In case of attentions in - * global blocks only the first in the chip should try - * to recover. - */ - if ((!load_status && - (!global || !other_load_status)) && - bnx2x_trylock_leader_lock(bp) && - !bnx2x_leader_reset(bp)) { - netdev_info(bp->dev, "Recovered in open\n"); - break; - } + /* Only the first function on the current + * engine should try to recover in open. In case + * of attentions in global blocks only the first + * in the chip should try to recover. + */ + if ((!load_status && + (!global || !other_load_status)) && + bnx2x_trylock_leader_lock(bp) && + !bnx2x_leader_reset(bp)) { + netdev_info(bp->dev, + "Recovered in open\n"); + break; + } - /* recovery has failed... */ - bnx2x_set_power_state(bp, PCI_D3hot); - bp->recovery_state = BNX2X_RECOVERY_FAILED; + /* recovery has failed... */ + bnx2x_set_power_state(bp, PCI_D3hot); + bp->recovery_state = BNX2X_RECOVERY_FAILED; - BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" - "If you still see this message after a few retries then power cycle is required.\n"); + BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" + "If you still see this message after a few retries then power cycle is required.\n"); - return -EAGAIN; - } while (0); + return -EAGAIN; + } while (0); + } + } bp->recovery_state = BNX2X_RECOVERY_DONE; - return bnx2x_nic_load(bp, LOAD_OPEN); + rc = bnx2x_nic_load(bp, LOAD_OPEN); + if (rc) + return rc; + return bnx2x_open_epilog(bp); } /* called with rtnl_lock */ @@ -11428,7 +11621,6 @@ static int bnx2x_set_mc_list(struct bnx2x *bp) return rc; } - /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ void bnx2x_set_rx_mode(struct net_device *dev) { @@ -11449,12 +11641,25 @@ void bnx2x_set_rx_mode(struct net_device *dev) CHIP_IS_E1(bp))) rx_mode = BNX2X_RX_MODE_ALLMULTI; else { - /* some multicasts */ - if (bnx2x_set_mc_list(bp) < 0) - rx_mode = BNX2X_RX_MODE_ALLMULTI; + if (IS_PF(bp)) { + /* some multicasts */ + if (bnx2x_set_mc_list(bp) < 0) + rx_mode = BNX2X_RX_MODE_ALLMULTI; - if (bnx2x_set_uc_list(bp) < 0) - rx_mode = BNX2X_RX_MODE_PROMISC; + if (bnx2x_set_uc_list(bp) < 0) + rx_mode = BNX2X_RX_MODE_PROMISC; + } else { + /* configuring mcast to a vf involves sleeping (when we + * wait for the pf's response). Since this function is + * called from non sleepable context we must schedule + * a work item for this purpose + */ + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_VFPF_MCAST, + &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); + } } bp->rx_mode = rx_mode; @@ -11468,7 +11673,20 @@ void bnx2x_set_rx_mode(struct net_device *dev) return; } - bnx2x_set_storm_rx_mode(bp); + if (IS_PF(bp)) { + bnx2x_set_storm_rx_mode(bp); + } else { + /* configuring rx mode to storms in a vf involves sleeping (when + * we wait for the pf's response). Since this function is + * called from non sleepable context we must schedule + * a work item for this purpose + */ + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, + &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); + } } /* called with rtnl_lock */ @@ -11571,7 +11789,9 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_poll_controller = poll_bnx2x, #endif .ndo_setup_tc = bnx2x_setup_tc, - +#ifdef CONFIG_BNX2X_SRIOV + .ndo_set_vf_mac = bnx2x_set_vf_mac, +#endif #ifdef NETDEV_FCOE_WWNN .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, #endif @@ -11595,10 +11815,9 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp) return 0; } -static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev, - unsigned long board_type) +static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, + struct net_device *dev, unsigned long board_type) { - struct bnx2x *bp; int rc; u32 pci_cfg_dword; bool chip_is_e1x = (board_type == BCM57710 || @@ -11606,11 +11825,9 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev, board_type == BCM57711E); SET_NETDEV_DEV(dev, &pdev->dev); - bp = netdev_priv(dev); bp->dev = dev; bp->pdev = pdev; - bp->flags = 0; rc = pci_enable_device(pdev); if (rc) { @@ -11626,9 +11843,8 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev, goto err_out_disable; } - if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { - dev_err(&bp->pdev->dev, "Cannot find second PCI device" - " base address, aborting\n"); + if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n"); rc = -ENODEV; goto err_out_disable; } @@ -11653,12 +11869,14 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev, pci_save_state(pdev); } - bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (bp->pm_cap == 0) { - dev_err(&bp->pdev->dev, - "Cannot find power management capability, aborting\n"); - rc = -EIO; - goto err_out_release; + if (IS_PF(bp)) { + bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (bp->pm_cap == 0) { + dev_err(&bp->pdev->dev, + "Cannot find power management capability, aborting\n"); + rc = -EIO; + goto err_out_release; + } } if (!pci_is_pcie(pdev)) { @@ -11690,13 +11908,14 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev, * support Physical Device Assignment where kernel BDF maybe arbitrary * (depending on hypervisor). */ - if (chip_is_e1x) + if (chip_is_e1x) { bp->pf_num = PCI_FUNC(pdev->devfn); - else {/* chip is E2/3*/ + } else { + /* chip is E2/3*/ pci_read_config_dword(bp->pdev, PCICFG_ME_REGISTER, &pci_cfg_dword); bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> - ME_REG_ABS_PF_NUM_SHIFT); + ME_REG_ABS_PF_NUM_SHIFT); } BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); @@ -11709,25 +11928,28 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev, * Clean the following indirect addresses for all functions since it * is not used by the driver. */ - REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); + if (IS_PF(bp)) { + REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); + + if (chip_is_e1x) { + REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); + } - if (chip_is_e1x) { - REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); + /* Enable internal target-read (in case we are probed after PF + * FLR). Must be done prior to any BAR read access. Only for + * 57712 and up + */ + if (!chip_is_e1x) + REG_WR(bp, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); } - /* - * Enable internal target-read (in case we are probed after PF FLR). - * Must be done prior to any BAR read access. Only for 57712 and up - */ - if (!chip_is_e1x) - REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); - dev->watchdog_timeo = TX_TIMEOUT; dev->netdev_ops = &bnx2x_netdev_ops; @@ -11778,8 +12000,9 @@ err_out: static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed) { - u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL); + u32 val = 0; + pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val); *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; /* return value of 1=2.5GHz 2=5GHz */ @@ -11792,7 +12015,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp) struct bnx2x_fw_file_hdr *fw_hdr; struct bnx2x_fw_file_section *sections; u32 offset, len, num_ops; - u16 *ops_offsets; + __be16 *ops_offsets; int i; const u8 *fw_ver; @@ -11817,7 +12040,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp) /* Likewise for the init_ops offsets */ offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); - ops_offsets = (u16 *)(firmware->data + offset); + ops_offsets = (__force __be16 *)(firmware->data + offset); num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { @@ -12044,8 +12267,12 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp) { int cid_count = BNX2X_L2_MAX_CID(bp); + if (IS_SRIOV(bp)) + cid_count += BNX2X_VF_CIDS; + if (CNIC_SUPPORT(bp)) cid_count += CNIC_CID_MAX; + return roundup(cid_count, QM_CID_ROUND); } @@ -12056,10 +12283,10 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp) * */ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, - int cnic_cnt) + int cnic_cnt, bool is_vf) { - int pos; - u16 control; + int pos, index; + u16 control = 0; pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); @@ -12067,85 +12294,114 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, * If MSI-X is not supported - return number of SBs needed to support * one fast path queue: one FP queue + SB for CNIC */ - if (!pos) + if (!pos) { + dev_info(&pdev->dev, "no msix capability found\n"); return 1 + cnic_cnt; + } + dev_info(&pdev->dev, "msix capability found\n"); /* * The value in the PCI configuration space is the index of the last * entry, namely one less than the actual size of the table, which is * exactly what we want to return from this function: number of all SBs * without the default SB. + * For VFs there is no default SB, then we return (index+1). */ pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); - return control & PCI_MSIX_FLAGS_QSIZE; -} -struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *); + index = control & PCI_MSIX_FLAGS_QSIZE; -static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct net_device *dev = NULL; - struct bnx2x *bp; - int pcie_width, pcie_speed; - int rc, max_non_def_sbs; - int rx_count, tx_count, rss_count, doorbell_size; - int cnic_cnt; - /* - * An estimated maximum supported CoS number according to the chip - * version. - * We will try to roughly estimate the maximum number of CoSes this chip - * may support in order to minimize the memory allocated for Tx - * netdev_queue's. This number will be accurately calculated during the - * initialization of bp->max_cos based on the chip versions AND chip - * revision in the bnx2x_init_bp(). - */ - u8 max_cos_est = 0; + return is_vf ? index + 1 : index; +} - switch (ent->driver_data) { +static int set_max_cos_est(int chip_id) +{ + switch (chip_id) { case BCM57710: case BCM57711: case BCM57711E: - max_cos_est = BNX2X_MULTI_TX_COS_E1X; - break; - + return BNX2X_MULTI_TX_COS_E1X; case BCM57712: case BCM57712_MF: - max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0; - break; - + case BCM57712_VF: + return BNX2X_MULTI_TX_COS_E2_E3A0; case BCM57800: case BCM57800_MF: + case BCM57800_VF: case BCM57810: case BCM57810_MF: - case BCM57840_O: case BCM57840_4_10: case BCM57840_2_20: + case BCM57840_O: case BCM57840_MFO: + case BCM57810_VF: case BCM57840_MF: + case BCM57840_VF: case BCM57811: case BCM57811_MF: - max_cos_est = BNX2X_MULTI_TX_COS_E3B0; - break; - + case BCM57811_VF: + return BNX2X_MULTI_TX_COS_E3B0; + return 1; default: - pr_err("Unknown board_type (%ld), aborting\n", - ent->driver_data); + pr_err("Unknown board_type (%d), aborting\n", chip_id); return -ENODEV; } +} + +static int set_is_vf(int chip_id) +{ + switch (chip_id) { + case BCM57712_VF: + case BCM57800_VF: + case BCM57810_VF: + case BCM57840_VF: + case BCM57811_VF: + return true; + default: + return false; + } +} - cnic_cnt = 1; - max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt); +struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); - WARN_ON(!max_non_def_sbs); +static int bnx2x_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev = NULL; + struct bnx2x *bp; + int pcie_width, pcie_speed; + int rc, max_non_def_sbs; + int rx_count, tx_count, rss_count, doorbell_size; + int max_cos_est; + bool is_vf; + int cnic_cnt; + + /* An estimated maximum supported CoS number according to the chip + * version. + * We will try to roughly estimate the maximum number of CoSes this chip + * may support in order to minimize the memory allocated for Tx + * netdev_queue's. This number will be accurately calculated during the + * initialization of bp->max_cos based on the chip versions AND chip + * revision in the bnx2x_init_bp(). + */ + max_cos_est = set_max_cos_est(ent->driver_data); + if (max_cos_est < 0) + return max_cos_est; + is_vf = set_is_vf(ent->driver_data); + cnic_cnt = is_vf ? 0 : 1; + + max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf); /* Maximum number of RSS queues: one IGU SB goes to CNIC */ - rss_count = max_non_def_sbs - cnic_cnt; + rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt; + + if (rss_count < 1) + return -EINVAL; /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ rx_count = rss_count + cnic_cnt; - /* - * Maximum number of netdev Tx queues: + /* Maximum number of netdev Tx queues: * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 */ tx_count = rss_count * max_cos_est + cnic_cnt; @@ -12157,42 +12413,55 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bp = netdev_priv(dev); + bp->flags = 0; + if (is_vf) + bp->flags |= IS_VF_FLAG; + bp->igu_sb_cnt = max_non_def_sbs; + bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; bp->msg_enable = debug; bp->cnic_support = cnic_cnt; bp->cnic_probe = bnx2x_cnic_probe; pci_set_drvdata(pdev, dev); - rc = bnx2x_init_dev(pdev, dev, ent->driver_data); + rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); if (rc < 0) { free_netdev(dev); return rc; } + BNX2X_DEV_INFO("This is a %s function\n", + IS_PF(bp) ? "physical" : "virtual"); BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off"); - BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); - + BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs); BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", - tx_count, rx_count); + tx_count, rx_count); rc = bnx2x_init_bp(bp); if (rc) goto init_one_exit; - /* - * Map doorbels here as we need the real value of bp->max_cos which - * is initialized in bnx2x_init_bp(). + /* Map doorbells here as we need the real value of bp->max_cos which + * is initialized in bnx2x_init_bp() to determine the number of + * l2 connections. */ - doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); - if (doorbell_size > pci_resource_len(pdev, 2)) { - dev_err(&bp->pdev->dev, - "Cannot map doorbells, bar size too small, aborting\n"); - rc = -ENOMEM; - goto init_one_exit; + if (IS_VF(bp)) { + bnx2x_vf_map_doorbells(bp); + rc = bnx2x_vf_pci_alloc(bp); + if (rc) + goto init_one_exit; + } else { + doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); + if (doorbell_size > pci_resource_len(pdev, 2)) { + dev_err(&bp->pdev->dev, + "Cannot map doorbells, bar size too small, aborting\n"); + rc = -ENOMEM; + goto init_one_exit; + } + bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), + doorbell_size); } - bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), - doorbell_size); if (!bp->doorbells) { dev_err(&bp->pdev->dev, "Cannot map doorbell space, aborting\n"); @@ -12200,8 +12469,25 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto init_one_exit; } + if (IS_VF(bp)) { + rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); + if (rc) + goto init_one_exit; + } + + /* Enable SRIOV if capability found in configuration space. + * Once the generic SR-IOV framework makes it in from the + * pci tree this will be revised, to allow dynamic control + * over the number of VFs. Right now, change the num of vfs + * param below to enable SR-IOV. + */ + rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/); + if (rc) + goto init_one_exit; + /* calc qm_cid_count */ bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); + BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count); /* disable FCOE L2 queue for E1x*/ if (CHIP_IS_E1x(bp)) @@ -12223,13 +12509,20 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Configure interrupt mode: try to enable MSI-X/MSI if * needed. */ - bnx2x_set_int_mode(bp); + rc = bnx2x_set_int_mode(bp); + if (rc) { + dev_err(&pdev->dev, "Cannot set interrupts\n"); + goto init_one_exit; + } + BNX2X_DEV_INFO("set interrupts successfully\n"); + /* register the net device */ rc = register_netdev(dev); if (rc) { dev_err(&pdev->dev, "Cannot register net device\n"); goto init_one_exit; } + BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); if (!NO_FCOE(bp)) { @@ -12240,6 +12533,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); + BNX2X_DEV_INFO("got pcie width %d and speed %d\n", + pcie_width, pcie_speed); BNX2X_DEV_INFO( "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", @@ -12257,7 +12552,7 @@ init_one_exit: if (bp->regview) iounmap(bp->regview); - if (bp->doorbells) + if (IS_PF(bp) && bp->doorbells) iounmap(bp->doorbells); free_netdev(dev); @@ -12297,25 +12592,37 @@ static void bnx2x_remove_one(struct pci_dev *pdev) unregister_netdev(dev); /* Power on: we can't let PCI layer write to us while we are in D3 */ - bnx2x_set_power_state(bp, PCI_D0); + if (IS_PF(bp)) + bnx2x_set_power_state(bp, PCI_D0); /* Disable MSI/MSI-X */ bnx2x_disable_msi(bp); /* Power off */ - bnx2x_set_power_state(bp, PCI_D3hot); + if (IS_PF(bp)) + bnx2x_set_power_state(bp, PCI_D3hot); /* Make sure RESET task is not scheduled before continuing */ cancel_delayed_work_sync(&bp->sp_rtnl_task); + bnx2x_iov_remove_one(bp); + + /* send message via vfpf channel to release the resources of this vf */ + if (IS_VF(bp)) + bnx2x_vfpf_release(bp); + if (bp->regview) iounmap(bp->regview); - if (bp->doorbells) - iounmap(bp->doorbells); - - bnx2x_release_firmware(bp); + /* for vf doorbells are part of the regview and were unmapped along with + * it. FW is only loaded by PF. + */ + if (IS_PF(bp)) { + if (bp->doorbells) + iounmap(bp->doorbells); + bnx2x_release_firmware(bp); + } bnx2x_free_mem_bp(bp); free_netdev(dev); @@ -13103,4 +13410,36 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) return cp; } +u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) +{ + struct bnx2x *bp = fp->bp; + u32 offset = BAR_USTRORM_INTMEM; + if (IS_VF(bp)) + return bnx2x_vf_ustorm_prods_offset(bp, fp); + else if (!CHIP_IS_E1x(bp)) + offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); + else + offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); + + return offset; +} + +/* called only on E1H or E2. + * When pretending to be PF, the pretend value is the function number 0...7 + * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID + * combination + */ +int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) +{ + u32 pretend_reg; + + if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX) + return -1; + + /* get my own pretend register */ + pretend_reg = bnx2x_get_pretend_reg(bp); + REG_WR(bp, pretend_reg, pretend_func_val); + REG_RD(bp, pretend_reg); + return 0; +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h index ddd5106ad2f9..caf1aef651eb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h @@ -1,6 +1,6 @@ /* bnx2x_mfw_req.h: Broadcom Everest network driver. * - * Copyright (c) 2012 Broadcom Corporation + * Copyright (c) 2012-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index bc2f65b32649..791eb2d53011 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -1,6 +1,6 @@ /* bnx2x_reg.h: Broadcom Everest network driver. * - * Copyright (c) 2007-2012 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -825,6 +825,7 @@ /* [RW 28] The value sent to CM header in the case of CFC load error. */ #define DORQ_REG_ERR_CMHEAD 0x170058 #define DORQ_REG_IF_EN 0x170004 +#define DORQ_REG_MAX_RVFID_SIZE 0x1701ec #define DORQ_REG_MODE_ACT 0x170008 /* [RW 5] The normal mode CID extraction offset. */ #define DORQ_REG_NORM_CID_OFST 0x17002c @@ -847,6 +848,22 @@ writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The read reads this written value. */ #define DORQ_REG_RSP_INIT_CRD 0x170048 +#define DORQ_REG_RSPB_CRD_CNT 0x1700b0 +#define DORQ_REG_VF_NORM_CID_BASE 0x1701a0 +#define DORQ_REG_VF_NORM_CID_OFST 0x1701f4 +#define DORQ_REG_VF_NORM_CID_WND_SIZE 0x1701a4 +#define DORQ_REG_VF_NORM_MAX_CID_COUNT 0x1701e4 +#define DORQ_REG_VF_NORM_VF_BASE 0x1701a8 +/* [RW 10] VF type validation mask value */ +#define DORQ_REG_VF_TYPE_MASK_0 0x170218 +/* [RW 17] VF type validation Min MCID value */ +#define DORQ_REG_VF_TYPE_MAX_MCID_0 0x1702d8 +/* [RW 17] VF type validation Max MCID value */ +#define DORQ_REG_VF_TYPE_MIN_MCID_0 0x170298 +/* [RW 10] VF type validation comp value */ +#define DORQ_REG_VF_TYPE_VALUE_0 0x170258 +#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340 + /* [RW 4] Initial activity counter value on the load request; when the shortcut is done. */ #define DORQ_REG_SHRT_ACT_CNT 0x170070 @@ -859,6 +876,7 @@ #define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2) #define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1) #define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0) +#define DORQ_REG_VF_USAGE_CNT 0x170320 #define HC_REG_AGG_INT_0 0x108050 #define HC_REG_AGG_INT_1 0x108054 #define HC_REG_ATTN_BIT 0x108120 @@ -2136,6 +2154,8 @@ /* [R 32] Interrupt register #0 read */ #define NIG_REG_NIG_INT_STS_0 0x103b0 #define NIG_REG_NIG_INT_STS_1 0x103c0 +/* [RC 32] Interrupt register #0 read clear */ +#define NIG_REG_NIG_INT_STS_CLR_0 0x103b4 /* [R 32] Legacy E1 and E1H location for parity error mask register. */ #define NIG_REG_NIG_PRTY_MASK 0x103dc /* [RW 32] Parity mask register #0 read/write */ @@ -2571,6 +2591,7 @@ current task in process). */ #define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c #define PBF_REG_DISABLE_PF 0x1402e8 +#define PBF_REG_DISABLE_VF 0x1402ec /* [RW 18] For port 0: For each client that is subject to WFQ (the * corresponding bit is 1); indicates to which of the credit registers this * client is mapped. For clients which are not credit blocked; their mapping @@ -3708,6 +3729,10 @@ #define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c /* [WB 160] Used for initialization of the inbound interrupts memory */ #define PXP_REG_HST_INBOUND_INT 0x103800 +/* [RW 7] Indirect access to the permission table. The fields are : {Valid; + * VFID[5:0]} + */ +#define PXP_REG_HST_ZONE_PERMISSION_TABLE 0x103400 /* [RW 32] Interrupt mask register #0 read/write */ #define PXP_REG_PXP_INT_MASK_0 0x103074 #define PXP_REG_PXP_INT_MASK_1 0x103084 @@ -5966,6 +5991,7 @@ #define HW_LOCK_RESOURCE_SPIO 2 #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1<<19) #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) #define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31) #define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30) @@ -6305,6 +6331,15 @@ #define PCI_PM_DATA_B 0x414 #define PCI_ID_VAL1 0x434 #define PCI_ID_VAL2 0x438 +#define GRC_CONFIG_REG_PF_INIT_VF 0x624 +#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf +/* First VF_NUM for PF is encoded in this register. + * The number of VFs assigned to a PF is assumed to be a multiple of 8. + * Software should program these bits based on Total Number of VFs \ + * programmed for each PF. + * Since registers from 0x000-0x7ff are split across functions, each PF will + * have the same location for the same 4 bits + */ #define PXPCS_TL_CONTROL_5 0x814 #define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/ @@ -6554,6 +6589,27 @@ (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */ +#define PXP_VF_ADDR_IGU_START 0 +#define PXP_VF_ADDR_IGU_SIZE 0x3000 +#define PXP_VF_ADDR_IGU_END\ + ((PXP_VF_ADDR_IGU_START) + (PXP_VF_ADDR_IGU_SIZE) - 1) + +#define PXP_VF_ADDR_USDM_QUEUES_START 0x3000 +#define PXP_VF_ADDR_USDM_QUEUES_SIZE\ + (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE) +#define PXP_VF_ADDR_USDM_QUEUES_END\ + ((PXP_VF_ADDR_USDM_QUEUES_START) + (PXP_VF_ADDR_USDM_QUEUES_SIZE) - 1) + +#define PXP_VF_ADDR_CSDM_GLOBAL_START 0x7600 +#define PXP_VF_ADDR_CSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE) +#define PXP_VF_ADDR_CSDM_GLOBAL_END\ + ((PXP_VF_ADDR_CSDM_GLOBAL_START) + (PXP_VF_ADDR_CSDM_GLOBAL_SIZE) - 1) + +#define PXP_VF_ADDR_DB_START 0x7c00 +#define PXP_VF_ADDR_DB_SIZE 0x200 +#define PXP_VF_ADDR_DB_END\ + ((PXP_VF_ADDR_DB_START) + (PXP_VF_ADDR_DB_SIZE) - 1) + #define MDIO_REG_BANK_CL73_IEEEB0 0x0 #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 09b625e0fdaa..7306416bc90d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -1,6 +1,6 @@ /* bnx2x_sp.c: Broadcom Everest network driver. * - * Copyright (c) 2011-2012 Broadcom Corporation + * Copyright (c) 2011-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -325,7 +325,7 @@ static inline int bnx2x_state_wait(struct bnx2x *bp, int state, return 0; } - usleep_range(1000, 1000); + usleep_range(1000, 2000); if (bp->panic) return -EIO; @@ -707,7 +707,8 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, struct eth_classify_header *hdr, int rule_cnt) { - hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT); + hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) | + (type << BNX2X_SWCID_SHIFT)); hdr->rule_cnt = (u8)rule_cnt; } @@ -813,8 +814,9 @@ static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, hdr->length = 1; hdr->offset = (u8)cam_offset; - hdr->client_id = 0xff; - hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT)); + hdr->client_id = cpu_to_le16(0xff); + hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | + (type << BNX2X_SWCID_SHIFT)); } static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, @@ -903,7 +905,7 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, (struct eth_classify_rules_ramrod_data *)(raw->rdata); int rule_cnt = rule_idx + 1; union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; - int cmd = elem->cmd_data.vlan_mac.cmd; + enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; @@ -953,7 +955,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, (struct eth_classify_rules_ramrod_data *)(raw->rdata); int rule_cnt = rule_idx + 1; union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; - int cmd = elem->cmd_data.vlan_mac.cmd; + enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; @@ -1407,7 +1409,7 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp, /* Wait until there are no pending commands */ if (!bnx2x_exe_queue_empty(exeq)) - usleep_range(1000, 1000); + usleep_range(1000, 2000); else return 0; } @@ -1442,7 +1444,7 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp, if (cqe->message.error) return -EINVAL; - /* Run the next bulk of pending commands if requeted */ + /* Run the next bulk of pending commands if requested */ if (test_bit(RAMROD_CONT, ramrod_flags)) { rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); if (rc < 0) @@ -1532,7 +1534,7 @@ static inline int bnx2x_vlan_mac_get_registry_elem( bool restore, struct bnx2x_vlan_mac_registry_elem **re) { - int cmd = elem->cmd_data.vlan_mac.cmd; + enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; struct bnx2x_vlan_mac_registry_elem *reg_elem; /* Allocate a new registry element if needed. */ @@ -1591,7 +1593,7 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, bool restore = test_bit(RAMROD_RESTORE, ramrod_flags); bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags); struct bnx2x_vlan_mac_registry_elem *reg_elem; - int cmd; + enum bnx2x_vlan_mac_cmd cmd; /* * If DRIVER_ONLY execution is requested, cleanup a registry @@ -2103,7 +2105,7 @@ static inline void __storm_memset_mac_filters(struct bnx2x *bp, static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, struct bnx2x_rx_mode_ramrod_params *p) { - /* update the bp MAC filter structure */ + /* update the bp MAC filter structure */ u32 mask = (1 << p->cl_id); struct tstorm_eth_mac_filter_config *mac_filters = @@ -2166,7 +2168,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, mac_filters->unmatched_unicast & ~mask; DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" - "accp_mcast 0x%x\naccp_bcast 0x%x\n", + "accp_mcast 0x%x\naccp_bcast 0x%x\n", mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, mac_filters->bcast_accept_all); @@ -2186,12 +2188,12 @@ static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, struct eth_classify_header *hdr, u8 rule_cnt) { - hdr->echo = cid; + hdr->echo = cpu_to_le32(cid); hdr->rule_cnt = rule_cnt; } static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, - unsigned long accept_flags, + unsigned long *accept_flags, struct eth_filter_rules_cmd *cmd, bool clear_accept_all) { @@ -2201,33 +2203,33 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; - if (accept_flags) { - if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags)) - state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; - if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags)) - state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; - if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) { - state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; - state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; - } + if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; + } - if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) { - state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; - state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; - } - if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags)) - state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) { + state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + } - if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) { - state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; - state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; - } - if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags)) - state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; + if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags)) + state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + + if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; } + if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags)) + state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; + /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ if (clear_accept_all) { state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; @@ -2260,8 +2262,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, data->rules[rule_idx].cmd_general_data = ETH_FILTER_RULES_CMD_TX_CMD; - bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, - &(data->rules[rule_idx++]), false); + bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags, + &(data->rules[rule_idx++]), + false); } /* Rx */ @@ -2272,8 +2275,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, data->rules[rule_idx].cmd_general_data = ETH_FILTER_RULES_CMD_RX_CMD; - bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, - &(data->rules[rule_idx++]), false); + bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags, + &(data->rules[rule_idx++]), + false); } @@ -2293,9 +2297,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, data->rules[rule_idx].cmd_general_data = ETH_FILTER_RULES_CMD_TX_CMD; - bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, - &(data->rules[rule_idx++]), + bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags, + &(data->rules[rule_idx]), true); + rule_idx++; } /* Rx */ @@ -2306,9 +2311,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, data->rules[rule_idx].cmd_general_data = ETH_FILTER_RULES_CMD_RX_CMD; - bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, - &(data->rules[rule_idx++]), + bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags, + &(data->rules[rule_idx]), true); + rule_idx++; } } @@ -2429,7 +2435,7 @@ static int bnx2x_mcast_wait(struct bnx2x *bp, static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { int total_sz; struct bnx2x_pending_mcast_cmd *new_cmd; @@ -2561,7 +2567,7 @@ static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o) static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, struct bnx2x_mcast_obj *o, int idx, union bnx2x_mcast_config_data *cfg_data, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_raw_obj *r = &o->raw; struct eth_multicast_rules_ramrod_data *data = @@ -2625,7 +2631,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e2( int *rdata_idx) { int cur_bin, cnt = *rdata_idx; - union bnx2x_mcast_config_data cfg_data = {0}; + union bnx2x_mcast_config_data cfg_data = {NULL}; /* go through the registry and configure the bins from it */ for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0; @@ -2657,7 +2663,7 @@ static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, { struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; int cnt = *line_idx; - union bnx2x_mcast_config_data cfg_data = {0}; + union bnx2x_mcast_config_data cfg_data = {NULL}; list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, link) { @@ -2780,7 +2786,7 @@ static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, int *line_idx) { struct bnx2x_mcast_list_elem *mlist_pos; - union bnx2x_mcast_config_data cfg_data = {0}; + union bnx2x_mcast_config_data cfg_data = {NULL}; int cnt = *line_idx; list_for_each_entry(mlist_pos, &p->mcast_list, link) { @@ -2790,7 +2796,7 @@ static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, cnt++; DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", - mlist_pos->mac); + mlist_pos->mac); } *line_idx = cnt; @@ -2827,7 +2833,8 @@ static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, * Returns number of lines filled in the ramrod data in total. */ static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, int cmd, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd, int start_cnt) { struct bnx2x_mcast_obj *o = p->mcast_obj; @@ -2861,7 +2868,7 @@ static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, static int bnx2x_mcast_validate_e2(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_mcast_obj *o = p->mcast_obj; int reg_sz = o->get_registry_size(o); @@ -2930,8 +2937,9 @@ static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, struct eth_multicast_rules_ramrod_data *data = (struct eth_multicast_rules_ramrod_data *)(r->rdata); - data->header.echo = ((r->cid & BNX2X_SWCID_MASK) | - (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); + data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | + (BNX2X_FILTER_MCAST_PENDING << + BNX2X_SWCID_SHIFT)); data->header.rule_cnt = len; } @@ -2965,7 +2973,7 @@ static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, static int bnx2x_mcast_setup_e2(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_raw_obj *raw = &p->mcast_obj->raw; struct bnx2x_mcast_obj *o = p->mcast_obj; @@ -3051,7 +3059,7 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp, static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { /* Mark, that there is a work to do */ if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) @@ -3085,7 +3093,7 @@ static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, BNX2X_57711_SET_MC_FILTER(mc_filter, bit); DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n", - mlist_pos->mac, bit); + mlist_pos->mac, bit); /* bookkeeping... */ BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, @@ -3113,7 +3121,7 @@ static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, */ static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { int i; struct bnx2x_mcast_obj *o = p->mcast_obj; @@ -3167,7 +3175,7 @@ static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, static int bnx2x_mcast_validate_e1(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_mcast_obj *o = p->mcast_obj; int reg_sz = o->get_registry_size(o); @@ -3240,7 +3248,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp, static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, struct bnx2x_mcast_obj *o, int idx, union bnx2x_mcast_config_data *cfg_data, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_raw_obj *r = &o->raw; struct mac_configuration_cmd *data = @@ -3284,9 +3292,10 @@ static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, BNX2X_MAX_MULTICAST*(1 + r->func_id)); data->hdr.offset = offset; - data->hdr.client_id = 0xff; - data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) | - (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); + data->hdr.client_id = cpu_to_le16(0xff); + data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | + (BNX2X_FILTER_MCAST_PENDING << + BNX2X_SWCID_SHIFT)); data->hdr.length = len; } @@ -3309,7 +3318,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1( { struct bnx2x_mcast_mac_elem *elem; int i = 0; - union bnx2x_mcast_config_data cfg_data = {0}; + union bnx2x_mcast_config_data cfg_data = {NULL}; /* go through the registry and configure the MACs from it. */ list_for_each_entry(elem, &o->registry.exact_match.macs, link) { @@ -3319,7 +3328,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1( i++; DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", - cfg_data.mac); + cfg_data.mac); } *rdata_idx = i; @@ -3334,7 +3343,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1( struct bnx2x_pending_mcast_cmd *cmd_pos; struct bnx2x_mcast_mac_elem *pmac_pos; struct bnx2x_mcast_obj *o = p->mcast_obj; - union bnx2x_mcast_config_data cfg_data = {0}; + union bnx2x_mcast_config_data cfg_data = {NULL}; int cnt = 0; @@ -3355,7 +3364,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1( cnt++; DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", - pmac_pos->mac); + pmac_pos->mac); } break; @@ -3458,7 +3467,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, static int bnx2x_mcast_setup_e1(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_mcast_obj *o = p->mcast_obj; struct bnx2x_raw_obj *raw = &o->raw; @@ -3562,7 +3571,7 @@ static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o, int bnx2x_config_mcast(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_mcast_obj *o = p->mcast_obj; struct bnx2x_raw_obj *r = &o->raw; @@ -4085,8 +4094,8 @@ static int bnx2x_setup_rss(struct bnx2x *bp, DP(BNX2X_MSG_SP, "Configuring RSS\n"); /* Set an echo field */ - data->echo = (r->cid & BNX2X_SWCID_MASK) | - (r->state << BNX2X_SWCID_SHIFT); + data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | + (r->state << BNX2X_SWCID_SHIFT)); /* RSS mode */ if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags)) @@ -4237,11 +4246,16 @@ int bnx2x_queue_state_change(struct bnx2x *bp, unsigned long *pending = &o->pending; /* Check that the requested transition is legal */ - if (o->check_transition(bp, o, params)) + rc = o->check_transition(bp, o, params); + if (rc) { + BNX2X_ERR("check transition returned an error. rc %d\n", rc); return -EINVAL; + } /* Set "pending" bit */ + DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending); pending_bit = o->set_pending(o, params); + DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending); /* Don't send a command if only driver cleanup was requested */ if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) @@ -5025,8 +5039,11 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp, * Don't allow a next state transition if we are in the middle of * the previous one. */ - if (o->pending) + if (o->pending) { + BNX2X_ERR("Blocking transition since pending was %lx\n", + o->pending); return -EBUSY; + } switch (state) { case BNX2X_Q_STATE_RESET: @@ -5199,6 +5216,27 @@ void bnx2x_init_queue_obj(struct bnx2x *bp, obj->set_pending = bnx2x_queue_set_pending; } +/* return a queue object's logical state*/ +int bnx2x_get_q_logical_state(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj) +{ + switch (obj->state) { + case BNX2X_Q_STATE_ACTIVE: + case BNX2X_Q_STATE_MULTI_COS: + return BNX2X_Q_LOGICAL_STATE_ACTIVE; + case BNX2X_Q_STATE_RESET: + case BNX2X_Q_STATE_INITIALIZED: + case BNX2X_Q_STATE_MCOS_TERMINATED: + case BNX2X_Q_STATE_INACTIVE: + case BNX2X_Q_STATE_STOPPED: + case BNX2X_Q_STATE_TERMINATED: + case BNX2X_Q_STATE_FLRED: + return BNX2X_Q_LOGICAL_STATE_STOPPED; + default: + return -EINVAL; + } +} + /********************** Function state object *********************************/ enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, struct bnx2x_func_sp_obj *o) @@ -5631,9 +5669,9 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, memset(rdata, 0, sizeof(*rdata)); /* Fill the ramrod data with provided parameters */ - rdata->function_mode = (u8)start_params->mf_mode; - rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); - rdata->path_id = BP_PATH(bp); + rdata->function_mode = (u8)start_params->mf_mode; + rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); + rdata->path_id = BP_PATH(bp); rdata->network_cos_mode = start_params->network_cos_mode; /* @@ -5716,21 +5754,20 @@ inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp, struct bnx2x_func_sp_obj *o = params->f_obj; struct afex_vif_list_ramrod_data *rdata = (struct afex_vif_list_ramrod_data *)o->afex_rdata; - struct bnx2x_func_afex_viflists_params *afex_viflist_params = + struct bnx2x_func_afex_viflists_params *afex_vif_params = ¶ms->params.afex_viflists; u64 *p_rdata = (u64 *)rdata; memset(rdata, 0, sizeof(*rdata)); /* Fill the ramrod data with provided parameters */ - rdata->vif_list_index = afex_viflist_params->vif_list_index; - rdata->func_bit_map = afex_viflist_params->func_bit_map; - rdata->afex_vif_list_command = - afex_viflist_params->afex_vif_list_command; - rdata->func_to_clear = afex_viflist_params->func_to_clear; + rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index); + rdata->func_bit_map = afex_vif_params->func_bit_map; + rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command; + rdata->func_to_clear = afex_vif_params->func_to_clear; /* send in echo type of sub command */ - rdata->echo = afex_viflist_params->afex_vif_list_command; + rdata->echo = afex_vif_params->afex_vif_list_command; /* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index adbd91b1bdfc..ff907609b9fc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -1,6 +1,6 @@ /* bnx2x_sp.h: Broadcom Everest network driver. * - * Copyright (c) 2011-2012 Broadcom Corporation + * Copyright (c) 2011-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -54,7 +54,7 @@ typedef enum { BNX2X_OBJ_TYPE_RX_TX, } bnx2x_obj_type; -/* Filtering states */ +/* Public slow path states */ enum { BNX2X_FILTER_MAC_PENDING, BNX2X_FILTER_VLAN_PENDING, @@ -524,7 +524,7 @@ struct bnx2x_mcast_ramrod_params { int mcast_list_len; }; -enum { +enum bnx2x_mcast_cmd { BNX2X_MCAST_CMD_ADD, BNX2X_MCAST_CMD_CONT, BNX2X_MCAST_CMD_DEL, @@ -573,7 +573,8 @@ struct bnx2x_mcast_obj { * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above) */ int (*config_mcast)(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, int cmd); + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd); /** * Fills the ramrod data during the RESTORE flow. @@ -590,11 +591,13 @@ struct bnx2x_mcast_obj { int start_bin, int *rdata_idx); int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, - struct bnx2x_mcast_ramrod_params *p, int cmd); + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd); void (*set_one_rule)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, int idx, - union bnx2x_mcast_config_data *cfg_data, int cmd); + union bnx2x_mcast_config_data *cfg_data, + enum bnx2x_mcast_cmd cmd); /** Checks if there are more mcast MACs to be set or a previous * command is still pending. @@ -617,7 +620,8 @@ struct bnx2x_mcast_obj { * feasible. */ int (*validate)(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, int cmd); + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd); /** * Restore the values of internal counters in case of a failure. @@ -776,6 +780,12 @@ enum bnx2x_q_state { BNX2X_Q_STATE_MAX, }; +/* Allowed Queue states */ +enum bnx2x_q_logical_state { + BNX2X_Q_LOGICAL_STATE_ACTIVE, + BNX2X_Q_LOGICAL_STATE_STOPPED, +}; + /* Allowed commands */ enum bnx2x_queue_cmd { BNX2X_Q_CMD_INIT, @@ -1261,6 +1271,9 @@ void bnx2x_init_queue_obj(struct bnx2x *bp, int bnx2x_queue_state_change(struct bnx2x *bp, struct bnx2x_queue_state_params *params); +int bnx2x_get_q_logical_state(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj); + /********************* VLAN-MAC ****************/ void bnx2x_init_mac_obj(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *mac_obj, @@ -1338,7 +1351,8 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp, * completions. */ int bnx2x_config_mcast(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, int cmd); + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd); /****************** CREDIT POOL ****************/ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c new file mode 100644 index 000000000000..6adfa2093581 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -0,0 +1,3198 @@ +/* bnx2x_sriov.c: Broadcom Everest network driver. + * + * Copyright 2009-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Written by: Shmulik Ravid <shmulikr@broadcom.com> + * Ariel Elior <ariele@broadcom.com> + * + */ +#include "bnx2x.h" +#include "bnx2x_init.h" +#include "bnx2x_cmn.h" +#include <linux/crc32.h> + +/* General service functions */ +static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, + u16 pf_id) +{ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); +} + +static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, + u8 enable) +{ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), + enable); +} + +int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) +{ + int idx; + + for_each_vf(bp, idx) + if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) + break; + return idx; +} + +static +struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) +{ + u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); + return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; +} + +static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, + u8 igu_sb_id, u8 segment, u16 index, u8 op, + u8 update) +{ + /* acking a VF sb through the PF - use the GRC */ + u32 ctl; + u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; + u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; + u32 func_encode = vf->abs_vfid; + u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; + struct igu_regular cmd_data = {0}; + + cmd_data.sb_id_and_flags = + ((index << IGU_REGULAR_SB_INDEX_SHIFT) | + (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | + (update << IGU_REGULAR_BUPDATE_SHIFT) | + (op << IGU_REGULAR_ENABLE_INT_SHIFT)); + + ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | + func_encode << IGU_CTRL_REG_FID_SHIFT | + IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; + + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + cmd_data.sb_id_and_flags, igu_addr_data); + REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); + mmiowb(); + barrier(); + + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + ctl, igu_addr_ctl); + REG_WR(bp, igu_addr_ctl, ctl); + mmiowb(); + barrier(); +} +/* VFOP - VF slow-path operation support */ + +#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 + +/* VFOP operations states */ +enum bnx2x_vfop_qctor_state { + BNX2X_VFOP_QCTOR_INIT, + BNX2X_VFOP_QCTOR_SETUP, + BNX2X_VFOP_QCTOR_INT_EN +}; + +enum bnx2x_vfop_qdtor_state { + BNX2X_VFOP_QDTOR_HALT, + BNX2X_VFOP_QDTOR_TERMINATE, + BNX2X_VFOP_QDTOR_CFCDEL, + BNX2X_VFOP_QDTOR_DONE +}; + +enum bnx2x_vfop_vlan_mac_state { + BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, + BNX2X_VFOP_VLAN_MAC_CLEAR, + BNX2X_VFOP_VLAN_MAC_CHK_DONE, + BNX2X_VFOP_MAC_CONFIG_LIST, + BNX2X_VFOP_VLAN_CONFIG_LIST, + BNX2X_VFOP_VLAN_CONFIG_LIST_0 +}; + +enum bnx2x_vfop_qsetup_state { + BNX2X_VFOP_QSETUP_CTOR, + BNX2X_VFOP_QSETUP_VLAN0, + BNX2X_VFOP_QSETUP_DONE +}; + +enum bnx2x_vfop_mcast_state { + BNX2X_VFOP_MCAST_DEL, + BNX2X_VFOP_MCAST_ADD, + BNX2X_VFOP_MCAST_CHK_DONE +}; +enum bnx2x_vfop_qflr_state { + BNX2X_VFOP_QFLR_CLR_VLAN, + BNX2X_VFOP_QFLR_CLR_MAC, + BNX2X_VFOP_QFLR_TERMINATE, + BNX2X_VFOP_QFLR_DONE +}; + +enum bnx2x_vfop_flr_state { + BNX2X_VFOP_FLR_QUEUES, + BNX2X_VFOP_FLR_HW +}; + +enum bnx2x_vfop_close_state { + BNX2X_VFOP_CLOSE_QUEUES, + BNX2X_VFOP_CLOSE_HW +}; + +enum bnx2x_vfop_rxmode_state { + BNX2X_VFOP_RXMODE_CONFIG, + BNX2X_VFOP_RXMODE_DONE +}; + +enum bnx2x_vfop_qteardown_state { + BNX2X_VFOP_QTEARDOWN_RXMODE, + BNX2X_VFOP_QTEARDOWN_CLR_VLAN, + BNX2X_VFOP_QTEARDOWN_CLR_MAC, + BNX2X_VFOP_QTEARDOWN_QDTOR, + BNX2X_VFOP_QTEARDOWN_DONE +}; + +#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) + +void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_queue_init_params *init_params, + struct bnx2x_queue_setup_params *setup_params, + u16 q_idx, u16 sb_idx) +{ + DP(BNX2X_MSG_IOV, + "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", + vf->abs_vfid, + q_idx, + sb_idx, + init_params->tx.sb_cq_index, + init_params->tx.hc_rate, + setup_params->flags, + setup_params->txq_params.traffic_type); +} + +void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_queue_init_params *init_params, + struct bnx2x_queue_setup_params *setup_params, + u16 q_idx, u16 sb_idx) +{ + struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; + + DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" + "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", + vf->abs_vfid, + q_idx, + sb_idx, + init_params->rx.sb_cq_index, + init_params->rx.hc_rate, + setup_params->gen_params.mtu, + rxq_params->buf_sz, + rxq_params->sge_buf_sz, + rxq_params->max_sges_pkt, + rxq_params->tpa_agg_sz, + setup_params->flags, + rxq_params->drop_flags, + rxq_params->cache_line_log); +} + +void bnx2x_vfop_qctor_prep(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q, + struct bnx2x_vfop_qctor_params *p, + unsigned long q_type) +{ + struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; + struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; + + /* INIT */ + + /* Enable host coalescing in the transition to INIT state */ + if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) + __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); + + if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) + __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); + + /* FW SB ID */ + init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); + init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); + + /* context */ + init_p->cxts[0] = q->cxt; + + /* SETUP */ + + /* Setup-op general parameters */ + setup_p->gen_params.spcl_id = vf->sp_cl_id; + setup_p->gen_params.stat_id = vfq_stat_id(vf, q); + + /* Setup-op pause params: + * Nothing to do, the pause thresholds are set by default to 0 which + * effectively turns off the feature for this queue. We don't want + * one queue (VF) to interfering with another queue (another VF) + */ + if (vf->cfg_flags & VF_CFG_FW_FC) + BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", + vf->abs_vfid); + /* Setup-op flags: + * collect statistics, zero statistics, local-switching, security, + * OV for Flex10, RSS and MCAST for leading + */ + if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) + __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); + + /* for VFs, enable tx switching, bd coherency, and mac address + * anti-spoofing + */ + __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); + __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); + __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); + + if (vfq_is_leading(q)) { + __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags); + __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags); + } + + /* Setup-op rx parameters */ + if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { + struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; + + rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); + rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); + rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); + + if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) + rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; + } + + /* Setup-op tx parameters */ + if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { + setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; + setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); + } +} + +/* VFOP queue construction */ +static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; + struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; + enum bnx2x_vfop_qctor_state state = vfop->state; + + bnx2x_vfop_reset_wq(vf); + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + + switch (state) { + case BNX2X_VFOP_QCTOR_INIT: + + /* has this queue already been opened? */ + if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == + BNX2X_Q_LOGICAL_STATE_ACTIVE) { + DP(BNX2X_MSG_IOV, + "Entered qctor but queue was already up. Aborting gracefully\n"); + goto op_done; + } + + /* next state */ + vfop->state = BNX2X_VFOP_QCTOR_SETUP; + + q_params->cmd = BNX2X_Q_CMD_INIT; + vfop->rc = bnx2x_queue_state_change(bp, q_params); + + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + + case BNX2X_VFOP_QCTOR_SETUP: + /* next state */ + vfop->state = BNX2X_VFOP_QCTOR_INT_EN; + + /* copy pre-prepared setup params to the queue-state params */ + vfop->op_p->qctor.qstate.params.setup = + vfop->op_p->qctor.prep_qsetup; + + q_params->cmd = BNX2X_Q_CMD_SETUP; + vfop->rc = bnx2x_queue_state_change(bp, q_params); + + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + + case BNX2X_VFOP_QCTOR_INT_EN: + + /* enable interrupts */ + bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), + USTORM_ID, 0, IGU_INT_ENABLE, 0); + goto op_done; + default: + bnx2x_vfop_default(state); + } +op_err: + BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", + vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); +op_done: + bnx2x_vfop_end(bp, vf, vfop); +op_pending: + return; +} + +static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + + vfop->args.qctor.qid = qid; + vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); + + bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, + bnx2x_vfop_qctor, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, + cmd->block); + } + return -ENOMEM; +} + +/* VFOP queue destruction */ +static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; + struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; + enum bnx2x_vfop_qdtor_state state = vfop->state; + + bnx2x_vfop_reset_wq(vf); + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + + switch (state) { + case BNX2X_VFOP_QDTOR_HALT: + + /* has this queue already been stopped? */ + if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == + BNX2X_Q_LOGICAL_STATE_STOPPED) { + DP(BNX2X_MSG_IOV, + "Entered qdtor but queue was already stopped. Aborting gracefully\n"); + goto op_done; + } + + /* next state */ + vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; + + q_params->cmd = BNX2X_Q_CMD_HALT; + vfop->rc = bnx2x_queue_state_change(bp, q_params); + + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + + case BNX2X_VFOP_QDTOR_TERMINATE: + /* next state */ + vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; + + q_params->cmd = BNX2X_Q_CMD_TERMINATE; + vfop->rc = bnx2x_queue_state_change(bp, q_params); + + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + + case BNX2X_VFOP_QDTOR_CFCDEL: + /* next state */ + vfop->state = BNX2X_VFOP_QDTOR_DONE; + + q_params->cmd = BNX2X_Q_CMD_CFC_DEL; + vfop->rc = bnx2x_queue_state_change(bp, q_params); + + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); +op_err: + BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", + vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); +op_done: + case BNX2X_VFOP_QDTOR_DONE: + /* invalidate the context */ + qdtor->cxt->ustorm_ag_context.cdu_usage = 0; + qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; + bnx2x_vfop_end(bp, vf, vfop); + return; + default: + bnx2x_vfop_default(state); + } +op_pending: + return; +} + +static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + struct bnx2x_queue_state_params *qstate = + &vf->op_params.qctor.qstate; + + memset(qstate, 0, sizeof(*qstate)); + qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); + + vfop->args.qdtor.qid = qid; + vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); + + bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, + bnx2x_vfop_qdtor, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, + cmd->block); + } + DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid); + return -ENOMEM; +} + +static void +bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) +{ + struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); + if (vf) { + if (!vf_sb_count(vf)) + vf->igu_base_id = igu_sb_id; + ++vf_sb_count(vf); + } +} + +/* VFOP MAC/VLAN helpers */ +static inline void bnx2x_vfop_credit(struct bnx2x *bp, + struct bnx2x_vfop *vfop, + struct bnx2x_vlan_mac_obj *obj) +{ + struct bnx2x_vfop_args_filters *args = &vfop->args.filters; + + /* update credit only if there is no error + * and a valid credit counter + */ + if (!vfop->rc && args->credit) { + int cnt = 0; + struct list_head *pos; + + list_for_each(pos, &obj->head) + cnt++; + + atomic_set(args->credit, cnt); + } +} + +static int bnx2x_vfop_set_user_req(struct bnx2x *bp, + struct bnx2x_vfop_filter *pos, + struct bnx2x_vlan_mac_data *user_req) +{ + user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : + BNX2X_VLAN_MAC_DEL; + + switch (pos->type) { + case BNX2X_VFOP_FILTER_MAC: + memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); + break; + case BNX2X_VFOP_FILTER_VLAN: + user_req->u.vlan.vlan = pos->vid; + break; + default: + BNX2X_ERR("Invalid filter type, skipping\n"); + return 1; + } + return 0; +} + +static int +bnx2x_vfop_config_vlan0(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *vlan_mac, + bool add) +{ + int rc; + + vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : + BNX2X_VLAN_MAC_DEL; + vlan_mac->user_req.u.vlan.vlan = 0; + + rc = bnx2x_config_vlan_mac(bp, vlan_mac); + if (rc == -EEXIST) + rc = 0; + return rc; +} + +static int bnx2x_vfop_config_list(struct bnx2x *bp, + struct bnx2x_vfop_filters *filters, + struct bnx2x_vlan_mac_ramrod_params *vlan_mac) +{ + struct bnx2x_vfop_filter *pos, *tmp; + struct list_head rollback_list, *filters_list = &filters->head; + struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; + int rc = 0, cnt = 0; + + INIT_LIST_HEAD(&rollback_list); + + list_for_each_entry_safe(pos, tmp, filters_list, link) { + if (bnx2x_vfop_set_user_req(bp, pos, user_req)) + continue; + + rc = bnx2x_config_vlan_mac(bp, vlan_mac); + if (rc >= 0) { + cnt += pos->add ? 1 : -1; + list_del(&pos->link); + list_add(&pos->link, &rollback_list); + rc = 0; + } else if (rc == -EEXIST) { + rc = 0; + } else { + BNX2X_ERR("Failed to add a new vlan_mac command\n"); + break; + } + } + + /* rollback if error or too many rules added */ + if (rc || cnt > filters->add_cnt) { + BNX2X_ERR("error or too many rules added. Performing rollback\n"); + list_for_each_entry_safe(pos, tmp, &rollback_list, link) { + pos->add = !pos->add; /* reverse op */ + bnx2x_vfop_set_user_req(bp, pos, user_req); + bnx2x_config_vlan_mac(bp, vlan_mac); + list_del(&pos->link); + } + cnt = 0; + if (!rc) + rc = -EINVAL; + } + filters->add_cnt = cnt; + return rc; +} + +/* VFOP set VLAN/MAC */ +static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; + struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; + struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; + + enum bnx2x_vfop_vlan_mac_state state = vfop->state; + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + + bnx2x_vfop_reset_wq(vf); + + switch (state) { + case BNX2X_VFOP_VLAN_MAC_CLEAR: + /* next state */ + vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; + + /* do delete */ + vfop->rc = obj->delete_all(bp, obj, + &vlan_mac->user_req.vlan_mac_flags, + &vlan_mac->ramrod_flags); + + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); + + case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: + /* next state */ + vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; + + /* do config */ + vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); + if (vfop->rc == -EEXIST) + vfop->rc = 0; + + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); + + case BNX2X_VFOP_VLAN_MAC_CHK_DONE: + vfop->rc = !!obj->raw.check_pending(&obj->raw); + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); + + case BNX2X_VFOP_MAC_CONFIG_LIST: + /* next state */ + vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; + + /* do list config */ + vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); + if (vfop->rc) + goto op_err; + + set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); + vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); + + case BNX2X_VFOP_VLAN_CONFIG_LIST: + /* next state */ + vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; + + /* remove vlan0 - could be no-op */ + vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); + if (vfop->rc) + goto op_err; + + /* Do vlan list config. if this operation fails we try to + * restore vlan0 to keep the queue is working order + */ + vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); + if (!vfop->rc) { + set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); + vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); + } + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ + + case BNX2X_VFOP_VLAN_CONFIG_LIST_0: + /* next state */ + vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; + + if (list_empty(&obj->head)) + /* add vlan0 */ + vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); + + default: + bnx2x_vfop_default(state); + } +op_err: + BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); +op_done: + kfree(filters); + bnx2x_vfop_credit(bp, vfop, obj); + bnx2x_vfop_end(bp, vf, vfop); +op_pending: + return; +} + +struct bnx2x_vfop_vlan_mac_flags { + bool drv_only; + bool dont_consume; + bool single_cmd; + bool add; +}; + +static void +bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, + struct bnx2x_vfop_vlan_mac_flags *flags) +{ + struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; + + memset(ramrod, 0, sizeof(*ramrod)); + + /* ramrod flags */ + if (flags->drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); + if (flags->single_cmd) + set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); + + /* mac_vlan flags */ + if (flags->dont_consume) + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); + + /* cmd */ + ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; +} + +static inline void +bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, + struct bnx2x_vfop_vlan_mac_flags *flags) +{ + bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); + set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); +} + +static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid, bool drv_only) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + struct bnx2x_vfop_args_filters filters = { + .multi_filter = NULL, /* single */ + .credit = NULL, /* consume credit */ + }; + struct bnx2x_vfop_vlan_mac_flags flags = { + .drv_only = drv_only, + .dont_consume = (filters.credit != NULL), + .single_cmd = true, + .add = false /* don't care */, + }; + struct bnx2x_vlan_mac_ramrod_params *ramrod = + &vf->op_params.vlan_mac; + + /* set ramrod params */ + bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); + + /* set object */ + ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + + /* set extra args */ + vfop->args.filters = filters; + + bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, + bnx2x_vfop_vlan_mac, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, + cmd->block); + } + return -ENOMEM; +} + +int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + struct bnx2x_vfop_filters *macs, + int qid, bool drv_only) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + struct bnx2x_vfop_args_filters filters = { + .multi_filter = macs, + .credit = NULL, /* consume credit */ + }; + struct bnx2x_vfop_vlan_mac_flags flags = { + .drv_only = drv_only, + .dont_consume = (filters.credit != NULL), + .single_cmd = false, + .add = false, /* don't care since only the items in the + * filters list affect the sp operation, + * not the list itself + */ + }; + struct bnx2x_vlan_mac_ramrod_params *ramrod = + &vf->op_params.vlan_mac; + + /* set ramrod params */ + bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); + + /* set object */ + ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + + /* set extra args */ + filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; + vfop->args.filters = filters; + + bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, + bnx2x_vfop_vlan_mac, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, + cmd->block); + } + return -ENOMEM; +} + +int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid, u16 vid, bool add) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + struct bnx2x_vfop_args_filters filters = { + .multi_filter = NULL, /* single command */ + .credit = &bnx2x_vfq(vf, qid, vlan_count), + }; + struct bnx2x_vfop_vlan_mac_flags flags = { + .drv_only = false, + .dont_consume = (filters.credit != NULL), + .single_cmd = true, + .add = add, + }; + struct bnx2x_vlan_mac_ramrod_params *ramrod = + &vf->op_params.vlan_mac; + + /* set ramrod params */ + bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); + ramrod->user_req.u.vlan.vlan = vid; + + /* set object */ + ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + + /* set extra args */ + vfop->args.filters = filters; + + bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, + bnx2x_vfop_vlan_mac, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, + cmd->block); + } + return -ENOMEM; +} + +static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid, bool drv_only) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + struct bnx2x_vfop_args_filters filters = { + .multi_filter = NULL, /* single command */ + .credit = &bnx2x_vfq(vf, qid, vlan_count), + }; + struct bnx2x_vfop_vlan_mac_flags flags = { + .drv_only = drv_only, + .dont_consume = (filters.credit != NULL), + .single_cmd = true, + .add = false, /* don't care */ + }; + struct bnx2x_vlan_mac_ramrod_params *ramrod = + &vf->op_params.vlan_mac; + + /* set ramrod params */ + bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); + + /* set object */ + ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + + /* set extra args */ + vfop->args.filters = filters; + + bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, + bnx2x_vfop_vlan_mac, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, + cmd->block); + } + return -ENOMEM; +} + +int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + struct bnx2x_vfop_filters *vlans, + int qid, bool drv_only) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + struct bnx2x_vfop_args_filters filters = { + .multi_filter = vlans, + .credit = &bnx2x_vfq(vf, qid, vlan_count), + }; + struct bnx2x_vfop_vlan_mac_flags flags = { + .drv_only = drv_only, + .dont_consume = (filters.credit != NULL), + .single_cmd = false, + .add = false, /* don't care */ + }; + struct bnx2x_vlan_mac_ramrod_params *ramrod = + &vf->op_params.vlan_mac; + + /* set ramrod params */ + bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); + + /* set object */ + ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + + /* set extra args */ + filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - + atomic_read(filters.credit); + + vfop->args.filters = filters; + + bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, + bnx2x_vfop_vlan_mac, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, + cmd->block); + } + return -ENOMEM; +} + +/* VFOP queue setup (queue constructor + set vlan 0) */ +static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + int qid = vfop->args.qctor.qid; + enum bnx2x_vfop_qsetup_state state = vfop->state; + struct bnx2x_vfop_cmd cmd = { + .done = bnx2x_vfop_qsetup, + .block = false, + }; + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + + switch (state) { + case BNX2X_VFOP_QSETUP_CTOR: + /* init the queue ctor command */ + vfop->state = BNX2X_VFOP_QSETUP_VLAN0; + vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); + if (vfop->rc) + goto op_err; + return; + + case BNX2X_VFOP_QSETUP_VLAN0: + /* skip if non-leading or FPGA/EMU*/ + if (qid) + goto op_done; + + /* init the queue set-vlan command (for vlan 0) */ + vfop->state = BNX2X_VFOP_QSETUP_DONE; + vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); + if (vfop->rc) + goto op_err; + return; +op_err: + BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); +op_done: + case BNX2X_VFOP_QSETUP_DONE: + bnx2x_vfop_end(bp, vf, vfop); + return; + default: + bnx2x_vfop_default(state); + } +} + +int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + vfop->args.qctor.qid = qid; + + bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, + bnx2x_vfop_qsetup, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, + cmd->block); + } + return -ENOMEM; +} + +/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ +static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + int qid = vfop->args.qx.qid; + enum bnx2x_vfop_qflr_state state = vfop->state; + struct bnx2x_queue_state_params *qstate; + struct bnx2x_vfop_cmd cmd; + + bnx2x_vfop_reset_wq(vf); + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); + + cmd.done = bnx2x_vfop_qflr; + cmd.block = false; + + switch (state) { + case BNX2X_VFOP_QFLR_CLR_VLAN: + /* vlan-clear-all: driver-only, don't consume credit */ + vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; + vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); + if (vfop->rc) + goto op_err; + return; + + case BNX2X_VFOP_QFLR_CLR_MAC: + /* mac-clear-all: driver only consume credit */ + vfop->state = BNX2X_VFOP_QFLR_TERMINATE; + vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); + DP(BNX2X_MSG_IOV, + "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", + vf->abs_vfid, vfop->rc); + if (vfop->rc) + goto op_err; + return; + + case BNX2X_VFOP_QFLR_TERMINATE: + qstate = &vfop->op_p->qctor.qstate; + memset(qstate , 0, sizeof(*qstate)); + qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); + vfop->state = BNX2X_VFOP_QFLR_DONE; + + DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", + vf->abs_vfid, qstate->q_obj->state); + + if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { + qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; + qstate->cmd = BNX2X_Q_CMD_TERMINATE; + vfop->rc = bnx2x_queue_state_change(bp, qstate); + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); + } else { + goto op_done; + } + +op_err: + BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", + vf->abs_vfid, qid, vfop->rc); +op_done: + case BNX2X_VFOP_QFLR_DONE: + bnx2x_vfop_end(bp, vf, vfop); + return; + default: + bnx2x_vfop_default(state); + } +op_pending: + return; +} + +static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + vfop->args.qx.qid = qid; + bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, + bnx2x_vfop_qflr, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, + cmd->block); + } + return -ENOMEM; +} + +/* VFOP multi-casts */ +static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; + struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; + struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; + enum bnx2x_vfop_mcast_state state = vfop->state; + int i; + + bnx2x_vfop_reset_wq(vf); + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + + switch (state) { + case BNX2X_VFOP_MCAST_DEL: + /* clear existing mcasts */ + vfop->state = BNX2X_VFOP_MCAST_ADD; + vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + + case BNX2X_VFOP_MCAST_ADD: + if (raw->check_pending(raw)) + goto op_pending; + + if (args->mc_num) { + /* update mcast list on the ramrod params */ + INIT_LIST_HEAD(&mcast->mcast_list); + for (i = 0; i < args->mc_num; i++) + list_add_tail(&(args->mc[i].link), + &mcast->mcast_list); + /* add new mcasts */ + vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; + vfop->rc = bnx2x_config_mcast(bp, mcast, + BNX2X_MCAST_CMD_ADD); + } + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); + + case BNX2X_VFOP_MCAST_CHK_DONE: + vfop->rc = raw->check_pending(raw) ? 1 : 0; + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); + default: + bnx2x_vfop_default(state); + } +op_err: + BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); +op_done: + kfree(args->mc); + bnx2x_vfop_end(bp, vf, vfop); +op_pending: + return; +} + +int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + bnx2x_mac_addr_t *mcasts, + int mcast_num, bool drv_only) +{ + struct bnx2x_vfop *vfop = NULL; + size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); + struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : + NULL; + + if (!mc_sz || mc) { + vfop = bnx2x_vfop_add(bp, vf); + if (vfop) { + int i; + struct bnx2x_mcast_ramrod_params *ramrod = + &vf->op_params.mcast; + + /* set ramrod params */ + memset(ramrod, 0, sizeof(*ramrod)); + ramrod->mcast_obj = &vf->mcast_obj; + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, + &ramrod->ramrod_flags); + + /* copy mcasts pointers */ + vfop->args.mc_list.mc_num = mcast_num; + vfop->args.mc_list.mc = mc; + for (i = 0; i < mcast_num; i++) + mc[i].mac = mcasts[i]; + + bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, + bnx2x_vfop_mcast, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, + cmd->block); + } else { + kfree(mc); + } + } + return -ENOMEM; +} + +/* VFOP rx-mode */ +static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; + enum bnx2x_vfop_rxmode_state state = vfop->state; + + bnx2x_vfop_reset_wq(vf); + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + + switch (state) { + case BNX2X_VFOP_RXMODE_CONFIG: + /* next state */ + vfop->state = BNX2X_VFOP_RXMODE_DONE; + + vfop->rc = bnx2x_config_rx_mode(bp, ramrod); + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); +op_err: + BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); +op_done: + case BNX2X_VFOP_RXMODE_DONE: + bnx2x_vfop_end(bp, vf, vfop); + return; + default: + bnx2x_vfop_default(state); + } +op_pending: + return; +} + +int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid, unsigned long accept_flags) +{ + struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + struct bnx2x_rx_mode_ramrod_params *ramrod = + &vf->op_params.rx_mode; + + memset(ramrod, 0, sizeof(*ramrod)); + + /* Prepare ramrod parameters */ + ramrod->cid = vfq->cid; + ramrod->cl_id = vfq_cl_id(vf, vfq); + ramrod->rx_mode_obj = &bp->rx_mode_obj; + ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); + + ramrod->rx_accept_flags = accept_flags; + ramrod->tx_accept_flags = accept_flags; + ramrod->pstate = &vf->filter_state; + ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; + + set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); + set_bit(RAMROD_RX, &ramrod->ramrod_flags); + set_bit(RAMROD_TX, &ramrod->ramrod_flags); + + ramrod->rdata = + bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); + ramrod->rdata_mapping = + bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); + + bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, + bnx2x_vfop_rxmode, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, + cmd->block); + } + return -ENOMEM; +} + +/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, + * queue destructor) + */ +static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + int qid = vfop->args.qx.qid; + enum bnx2x_vfop_qteardown_state state = vfop->state; + struct bnx2x_vfop_cmd cmd; + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + + cmd.done = bnx2x_vfop_qdown; + cmd.block = false; + + switch (state) { + case BNX2X_VFOP_QTEARDOWN_RXMODE: + /* Drop all */ + vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; + vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); + if (vfop->rc) + goto op_err; + return; + + case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: + /* vlan-clear-all: don't consume credit */ + vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; + vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); + if (vfop->rc) + goto op_err; + return; + + case BNX2X_VFOP_QTEARDOWN_CLR_MAC: + /* mac-clear-all: consume credit */ + vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; + vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); + if (vfop->rc) + goto op_err; + return; + + case BNX2X_VFOP_QTEARDOWN_QDTOR: + /* run the queue destruction flow */ + DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); + vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; + DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); + vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); + DP(BNX2X_MSG_IOV, "returned from cmd\n"); + if (vfop->rc) + goto op_err; + return; +op_err: + BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", + vf->abs_vfid, qid, vfop->rc); + + case BNX2X_VFOP_QTEARDOWN_DONE: + bnx2x_vfop_end(bp, vf, vfop); + return; + default: + bnx2x_vfop_default(state); + } +} + +int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + vfop->args.qx.qid = qid; + bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE, + bnx2x_vfop_qdown, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, + cmd->block); + } + + return -ENOMEM; +} + +/* VF enable primitives + * when pretend is required the caller is responsible + * for calling pretend prior to calling these routines + */ + +/* internal vf enable - until vf is enabled internally all transactions + * are blocked. this routine should always be called last with pretend. + */ +static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) +{ + REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); +} + +/* clears vf error in all semi blocks */ +static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) +{ + REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); + REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); + REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); + REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); +} + +static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) +{ + u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; + u32 was_err_reg = 0; + + switch (was_err_group) { + case 0: + was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; + break; + case 1: + was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; + break; + case 2: + was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; + break; + case 3: + was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; + break; + } + REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); +} + +static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int i; + u32 val; + + /* Set VF masks and configuration - pretend */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); + + REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); + REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); + REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); + REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); + REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); + REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); + + val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); + val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); + if (vf->cfg_flags & VF_CFG_INT_SIMD) + val |= IGU_VF_CONF_SINGLE_ISR_EN; + val &= ~IGU_VF_CONF_PARENT_MASK; + val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */ + REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); + + DP(BNX2X_MSG_IOV, + "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", + vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); + + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + /* iterate over all queues, clear sb consumer */ + for (i = 0; i < vf_sb_count(vf); i++) { + u8 igu_sb_id = vf_igu_sb(vf, i); + + /* zero prod memory */ + REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); + + /* clear sb state machine */ + bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, + false /* VF */); + + /* disable + update */ + bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, + IGU_INT_DISABLE, 1); + } +} + +void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) +{ + /* set the VF-PF association in the FW */ + storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); + storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); + + /* clear vf errors*/ + bnx2x_vf_semi_clear_err(bp, abs_vfid); + bnx2x_vf_pglue_clear_err(bp, abs_vfid); + + /* internal vf-enable - pretend */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); + DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); + bnx2x_vf_enable_internal(bp, true); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); +} + +static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + /* Reset vf in IGU interrupts are still disabled */ + bnx2x_vf_igu_reset(bp, vf); + + /* pretend to enable the vf with the PBF */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); + REG_WR(bp, PBF_REG_DISABLE_VF, 0); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); +} + +static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) +{ + struct pci_dev *dev; + struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); + + if (!vf) + goto unknown_dev; + + dev = pci_get_bus_and_slot(vf->bus, vf->devfn); + if (dev) + return bnx2x_is_pcie_pending(dev); + +unknown_dev: + BNX2X_ERR("Unknown device\n"); + return false; +} + +int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) +{ + /* Wait 100ms */ + msleep(100); + + /* Verify no pending pci transactions */ + if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) + BNX2X_ERR("PCIE Transactions still pending\n"); + + return 0; +} + +/* must be called after the number of PF queues and the number of VFs are + * both known + */ +static void +bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) +{ + u16 vlan_count = 0; + + /* will be set only during VF-ACQUIRE */ + resc->num_rxqs = 0; + resc->num_txqs = 0; + + /* no credit calculcis for macs (just yet) */ + resc->num_mac_filters = 1; + + /* divvy up vlan rules */ + vlan_count = bp->vlans_pool.check(&bp->vlans_pool); + vlan_count = 1 << ilog2(vlan_count); + resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); + + /* no real limitation */ + resc->num_mc_filters = 0; + + /* num_sbs already set */ +} + +/* FLR routines: */ +static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + /* reset the state variables */ + bnx2x_iov_static_resc(bp, &vf->alloc_resc); + vf->state = VF_FREE; +} + +static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); + + /* DQ usage counter */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); + bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, + "DQ VF usage counter timed out", + poll_cnt); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + /* FW cleanup command - poll for the results */ + if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), + poll_cnt)) + BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); + + /* verify TX hw is flushed */ + bnx2x_tx_hw_flushed(bp, poll_cnt); +} + +static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; + enum bnx2x_vfop_flr_state state = vfop->state; + struct bnx2x_vfop_cmd cmd = { + .done = bnx2x_vfop_flr, + .block = false, + }; + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + + switch (state) { + case BNX2X_VFOP_FLR_QUEUES: + /* the cleanup operations are valid if and only if the VF + * was first acquired. + */ + if (++(qx->qid) < vf_rxq_count(vf)) { + vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, + qx->qid); + if (vfop->rc) + goto op_err; + return; + } + /* remove multicasts */ + vfop->state = BNX2X_VFOP_FLR_HW; + vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, + 0, true); + if (vfop->rc) + goto op_err; + return; + case BNX2X_VFOP_FLR_HW: + + /* dispatch final cleanup and wait for HW queues to flush */ + bnx2x_vf_flr_clnup_hw(bp, vf); + + /* release VF resources */ + bnx2x_vf_free_resc(bp, vf); + + /* re-open the mailbox */ + bnx2x_vf_enable_mbx(bp, vf->abs_vfid); + + goto op_done; + default: + bnx2x_vfop_default(state); + } +op_err: + BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); +op_done: + vf->flr_clnup_stage = VF_FLR_ACK; + bnx2x_vfop_end(bp, vf, vfop); + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); +} + +static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + vfop_handler_t done) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + if (vfop) { + vfop->args.qx.qid = -1; /* loop */ + bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, + bnx2x_vfop_flr, done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); + } + return -ENOMEM; +} + +static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) +{ + int i = prev_vf ? prev_vf->index + 1 : 0; + struct bnx2x_virtf *vf; + + /* find next VF to cleanup */ +next_vf_to_clean: + for (; + i < BNX2X_NR_VIRTFN(bp) && + (bnx2x_vf(bp, i, state) != VF_RESET || + bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); + i++) + ; + + DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. num of vfs: %d\n", i, + BNX2X_NR_VIRTFN(bp)); + + if (i < BNX2X_NR_VIRTFN(bp)) { + vf = BP_VF(bp, i); + + /* lock the vf pf channel */ + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); + + /* invoke the VF FLR SM */ + if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { + BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", + vf->abs_vfid); + + /* mark the VF to be ACKED and continue */ + vf->flr_clnup_stage = VF_FLR_ACK; + goto next_vf_to_clean; + } + return; + } + + /* we are done, update vf records */ + for_each_vf(bp, i) { + vf = BP_VF(bp, i); + + if (vf->flr_clnup_stage != VF_FLR_ACK) + continue; + + vf->flr_clnup_stage = VF_FLR_EPILOG; + } + + /* Acknowledge the handled VFs. + * we are acknowledge all the vfs which an flr was requested for, even + * if amongst them there are such that we never opened, since the mcp + * will interrupt us immediately again if we only ack some of the bits, + * resulting in an endless loop. This can happen for example in KVM + * where an 'all ones' flr request is sometimes given by hyper visor + */ + DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", + bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); + for (i = 0; i < FLRD_VFS_DWORDS; i++) + SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], + bp->vfdb->flrd_vfs[i]); + + bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); + + /* clear the acked bits - better yet if the MCP implemented + * write to clear semantics + */ + for (i = 0; i < FLRD_VFS_DWORDS; i++) + SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); +} + +void bnx2x_vf_handle_flr_event(struct bnx2x *bp) +{ + int i; + + /* Read FLR'd VFs */ + for (i = 0; i < FLRD_VFS_DWORDS; i++) + bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); + + DP(BNX2X_MSG_MCP, + "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", + bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); + + for_each_vf(bp, i) { + struct bnx2x_virtf *vf = BP_VF(bp, i); + u32 reset = 0; + + if (vf->abs_vfid < 32) + reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); + else + reset = bp->vfdb->flrd_vfs[1] & + (1 << (vf->abs_vfid - 32)); + + if (reset) { + /* set as reset and ready for cleanup */ + vf->state = VF_RESET; + vf->flr_clnup_stage = VF_FLR_CLN; + + DP(BNX2X_MSG_IOV, + "Initiating Final cleanup for VF %d\n", + vf->abs_vfid); + } + } + + /* do the FLR cleanup for all marked VFs*/ + bnx2x_vf_flr_clnup(bp, NULL); +} + +/* IOV global initialization routines */ +void bnx2x_iov_init_dq(struct bnx2x *bp) +{ + if (!IS_SRIOV(bp)) + return; + + /* Set the DQ such that the CID reflect the abs_vfid */ + REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); + REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); + + /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to + * the PF L2 queues + */ + REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); + + /* The VF window size is the log2 of the max number of CIDs per VF */ + REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); + + /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match + * the Pf doorbell size although the 2 are independent. + */ + REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, + BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); + + /* No security checks for now - + * configure single rule (out of 16) mask = 0x1, value = 0x0, + * CID range 0 - 0x1ffff + */ + REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); + REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); + REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); + REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); + + /* set the number of VF alllowed doorbells to the full DQ range */ + REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); + + /* set the VF doorbell threshold */ + REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); +} + +void bnx2x_iov_init_dmae(struct bnx2x *bp) +{ + DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); + if (!IS_SRIOV(bp)) + return; + + REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); +} + +static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) +{ + struct pci_dev *dev = bp->pdev; + struct bnx2x_sriov *iov = &bp->vfdb->sriov; + + return dev->bus->number + ((dev->devfn + iov->offset + + iov->stride * vfid) >> 8); +} + +static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) +{ + struct pci_dev *dev = bp->pdev; + struct bnx2x_sriov *iov = &bp->vfdb->sriov; + + return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; +} + +static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int i, n; + struct pci_dev *dev = bp->pdev; + struct bnx2x_sriov *iov = &bp->vfdb->sriov; + + for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { + u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); + u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); + + size /= iov->total; + vf->bars[n].bar = start + size * vf->abs_vfid; + vf->bars[n].size = size; + } +} + +static int bnx2x_ari_enabled(struct pci_dev *dev) +{ + return dev->bus->self && dev->bus->self->ari_enabled; +} + +static void +bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) +{ + int sb_id; + u32 val; + u8 fid; + + /* IGU in normal mode - read CAM */ + for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { + val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); + if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) + continue; + fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); + if (!(fid & IGU_FID_ENCODE_IS_PF)) + bnx2x_vf_set_igu_info(bp, sb_id, + (fid & IGU_FID_VF_NUM_MASK)); + + DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", + ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), + ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : + (fid & IGU_FID_VF_NUM_MASK)), sb_id, + GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); + } +} + +static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) +{ + if (bp->vfdb) { + kfree(bp->vfdb->vfqs); + kfree(bp->vfdb->vfs); + kfree(bp->vfdb); + } + bp->vfdb = NULL; +} + +static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) +{ + int pos; + struct pci_dev *dev = bp->pdev; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + BNX2X_ERR("failed to find SRIOV capability in device\n"); + return -ENODEV; + } + + iov->pos = pos; + DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); + pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); + pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); + pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); + pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); + pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); + pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); + pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); + pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); + + return 0; +} + +static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) +{ + u32 val; + + /* read the SRIOV capability structure + * The fields can be read via configuration read or + * directly from the device (starting at offset PCICFG_OFFSET) + */ + if (bnx2x_sriov_pci_cfg_info(bp, iov)) + return -ENODEV; + + /* get the number of SRIOV bars */ + iov->nres = 0; + + /* read the first_vfid */ + val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); + iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) + * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); + + DP(BNX2X_MSG_IOV, + "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", + BP_FUNC(bp), + iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, + iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); + + return 0; +} + +static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp) +{ + int i; + u8 queue_count = 0; + + if (IS_SRIOV(bp)) + for_each_vf(bp, i) + queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs); + + return queue_count; +} + +/* must be called after PF bars are mapped */ +int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, + int num_vfs_param) +{ + int err, i, qcount; + struct bnx2x_sriov *iov; + struct pci_dev *dev = bp->pdev; + + bp->vfdb = NULL; + + /* verify is pf */ + if (IS_VF(bp)) + return 0; + + /* verify sriov capability is present in configuration space */ + if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) + return 0; + + /* verify chip revision */ + if (CHIP_IS_E1x(bp)) + return 0; + + /* check if SRIOV support is turned off */ + if (!num_vfs_param) + return 0; + + /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ + if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { + BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", + BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); + return 0; + } + + /* SRIOV can be enabled only with MSIX */ + if (int_mode_param == BNX2X_INT_MODE_MSI || + int_mode_param == BNX2X_INT_MODE_INTX) + BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); + + err = -EIO; + /* verify ari is enabled */ + if (!bnx2x_ari_enabled(bp->pdev)) { + BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n"); + return err; + } + + /* verify igu is in normal mode */ + if (CHIP_INT_MODE_IS_BC(bp)) { + BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); + return err; + } + + /* allocate the vfs database */ + bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); + if (!bp->vfdb) { + BNX2X_ERR("failed to allocate vf database\n"); + err = -ENOMEM; + goto failed; + } + + /* get the sriov info - Linux already collected all the pertinent + * information, however the sriov structure is for the private use + * of the pci module. Also we want this information regardless + * of the hyper-visor. + */ + iov = &(bp->vfdb->sriov); + err = bnx2x_sriov_info(bp, iov); + if (err) + goto failed; + + /* SR-IOV capability was enabled but there are no VFs*/ + if (iov->total == 0) + goto failed; + + /* calculate the actual number of VFs */ + iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param); + + /* allocate the vf array */ + bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * + BNX2X_NR_VIRTFN(bp), GFP_KERNEL); + if (!bp->vfdb->vfs) { + BNX2X_ERR("failed to allocate vf array\n"); + err = -ENOMEM; + goto failed; + } + + /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ + for_each_vf(bp, i) { + bnx2x_vf(bp, i, index) = i; + bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; + bnx2x_vf(bp, i, state) = VF_FREE; + INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); + mutex_init(&bnx2x_vf(bp, i, op_mutex)); + bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; + } + + /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ + bnx2x_get_vf_igu_cam_info(bp); + + /* get the total queue count and allocate the global queue arrays */ + qcount = bnx2x_iov_get_max_queue_count(bp); + + /* allocate the queue arrays for all VFs */ + bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), + GFP_KERNEL); + if (!bp->vfdb->vfqs) { + BNX2X_ERR("failed to allocate vf queue array\n"); + err = -ENOMEM; + goto failed; + } + + return 0; +failed: + DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); + __bnx2x_iov_free_vfdb(bp); + return err; +} + +void bnx2x_iov_remove_one(struct bnx2x *bp) +{ + /* if SRIOV is not enabled there's nothing to do */ + if (!IS_SRIOV(bp)) + return; + + DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); + pci_disable_sriov(bp->pdev); + DP(BNX2X_MSG_IOV, "sriov disabled\n"); + + /* free vf database */ + __bnx2x_iov_free_vfdb(bp); +} + +void bnx2x_iov_free_mem(struct bnx2x *bp) +{ + int i; + + if (!IS_SRIOV(bp)) + return; + + /* free vfs hw contexts */ + for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { + struct hw_dma *cxt = &bp->vfdb->context[i]; + BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); + } + + BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, + BP_VFDB(bp)->sp_dma.mapping, + BP_VFDB(bp)->sp_dma.size); + + BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, + BP_VF_MBX_DMA(bp)->mapping, + BP_VF_MBX_DMA(bp)->size); + + BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, + BP_VF_BULLETIN_DMA(bp)->mapping, + BP_VF_BULLETIN_DMA(bp)->size); +} + +int bnx2x_iov_alloc_mem(struct bnx2x *bp) +{ + size_t tot_size; + int i, rc = 0; + + if (!IS_SRIOV(bp)) + return rc; + + /* allocate vfs hw contexts */ + tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * + BNX2X_CIDS_PER_VF * sizeof(union cdu_context); + + for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { + struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); + cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); + + if (cxt->size) { + BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); + } else { + cxt->addr = NULL; + cxt->mapping = 0; + } + tot_size -= cxt->size; + } + + /* allocate vfs ramrods dma memory - client_init and set_mac */ + tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); + BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, + tot_size); + BP_VFDB(bp)->sp_dma.size = tot_size; + + /* allocate mailboxes */ + tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; + BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, + tot_size); + BP_VF_MBX_DMA(bp)->size = tot_size; + + /* allocate local bulletin boards */ + tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; + BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, + &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); + BP_VF_BULLETIN_DMA(bp)->size = tot_size; + + return 0; + +alloc_mem_err: + return -ENOMEM; +} + +static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q) +{ + u8 cl_id = vfq_cl_id(vf, q); + u8 func_id = FW_VF_HANDLE(vf->abs_vfid); + unsigned long q_type = 0; + + set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + + /* Queue State object */ + bnx2x_init_queue_obj(bp, &q->sp_obj, + cl_id, &q->cid, 1, func_id, + bnx2x_vf_sp(bp, vf, q_data), + bnx2x_vf_sp_map(bp, vf, q_data), + q_type); + + DP(BNX2X_MSG_IOV, + "initialized vf %d's queue object. func id set to %d\n", + vf->abs_vfid, q->sp_obj.func_id); + + /* mac/vlan objects are per queue, but only those + * that belong to the leading queue are initialized + */ + if (vfq_is_leading(q)) { + /* mac */ + bnx2x_init_mac_obj(bp, &q->mac_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, mac_rdata), + bnx2x_vf_sp_map(bp, vf, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &bp->macs_pool); + /* vlan */ + bnx2x_init_vlan_obj(bp, &q->vlan_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, vlan_rdata), + bnx2x_vf_sp_map(bp, vf, vlan_rdata), + BNX2X_FILTER_VLAN_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &bp->vlans_pool); + + /* mcast */ + bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, + q->cid, func_id, func_id, + bnx2x_vf_sp(bp, vf, mcast_rdata), + bnx2x_vf_sp_map(bp, vf, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + vf->leading_rss = cl_id; + } +} + +/* called by bnx2x_nic_load */ +int bnx2x_iov_nic_init(struct bnx2x *bp) +{ + int vfid, qcount, i; + + if (!IS_SRIOV(bp)) { + DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); + return 0; + } + + DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); + + /* initialize vf database */ + for_each_vf(bp, vfid) { + struct bnx2x_virtf *vf = BP_VF(bp, vfid); + + int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * + BNX2X_CIDS_PER_VF; + + union cdu_context *base_cxt = (union cdu_context *) + BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + + (base_vf_cid & (ILT_PAGE_CIDS-1)); + + DP(BNX2X_MSG_IOV, + "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", + vf->abs_vfid, vf_sb_count(vf), base_vf_cid, + BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); + + /* init statically provisioned resources */ + bnx2x_iov_static_resc(bp, &vf->alloc_resc); + + /* queues are initialized during VF-ACQUIRE */ + + /* reserve the vf vlan credit */ + bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); + + vf->filter_state = 0; + vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); + + /* init mcast object - This object will be re-initialized + * during VF-ACQUIRE with the proper cl_id and cid. + * It needs to be initialized here so that it can be safely + * handled by a subsequent FLR flow. + */ + bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, + 0xFF, 0xFF, 0xFF, + bnx2x_vf_sp(bp, vf, mcast_rdata), + bnx2x_vf_sp_map(bp, vf, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + /* set the mailbox message addresses */ + BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) + (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * + MBX_MSG_ALIGNED_SIZE); + + BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + + vfid * MBX_MSG_ALIGNED_SIZE; + + /* Enable vf mailbox */ + bnx2x_vf_enable_mbx(bp, vf->abs_vfid); + } + + /* Final VF init */ + qcount = 0; + for_each_vf(bp, i) { + struct bnx2x_virtf *vf = BP_VF(bp, i); + + /* fill in the BDF and bars */ + vf->bus = bnx2x_vf_bus(bp, i); + vf->devfn = bnx2x_vf_devfn(bp, i); + bnx2x_vf_set_bars(bp, vf); + + DP(BNX2X_MSG_IOV, + "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", + vf->abs_vfid, vf->bus, vf->devfn, + (unsigned)vf->bars[0].bar, vf->bars[0].size, + (unsigned)vf->bars[1].bar, vf->bars[1].size, + (unsigned)vf->bars[2].bar, vf->bars[2].size); + + /* set local queue arrays */ + vf->vfqs = &bp->vfdb->vfqs[qcount]; + qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs); + } + + return 0; +} + +/* called by bnx2x_chip_cleanup */ +int bnx2x_iov_chip_cleanup(struct bnx2x *bp) +{ + int i; + + if (!IS_SRIOV(bp)) + return 0; + + /* release all the VFs */ + for_each_vf(bp, i) + bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ + + return 0; +} + +/* called by bnx2x_init_hw_func, returns the next ilt line */ +int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) +{ + int i; + struct bnx2x_ilt *ilt = BP_ILT(bp); + + if (!IS_SRIOV(bp)) + return line; + + /* set vfs ilt lines */ + for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { + struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); + + ilt->lines[line+i].page = hw_cxt->addr; + ilt->lines[line+i].page_mapping = hw_cxt->mapping; + ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ + } + return line + i; +} + +static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) +{ + return ((cid >= BNX2X_FIRST_VF_CID) && + ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); +} + +static +void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, + struct bnx2x_vf_queue *vfq, + union event_ring_elem *elem) +{ + unsigned long ramrod_flags = 0; + int rc = 0; + + /* Always push next commands out, don't wait here */ + set_bit(RAMROD_CONT, &ramrod_flags); + + switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { + case BNX2X_FILTER_MAC_PENDING: + rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, + &ramrod_flags); + break; + case BNX2X_FILTER_VLAN_PENDING: + rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, + &ramrod_flags); + break; + default: + BNX2X_ERR("Unsupported classification command: %d\n", + elem->message.data.eth_event.echo); + return; + } + if (rc < 0) + BNX2X_ERR("Failed to schedule new commands: %d\n", rc); + else if (rc > 0) + DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); +} + +static +void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + struct bnx2x_mcast_ramrod_params rparam = {NULL}; + int rc; + + rparam.mcast_obj = &vf->mcast_obj; + vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); + + /* If there are pending mcast commands - send them */ + if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + if (rc < 0) + BNX2X_ERR("Failed to send pending mcast commands: %d\n", + rc); + } +} + +static +void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + smp_mb__before_clear_bit(); + clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); + smp_mb__after_clear_bit(); +} + +int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) +{ + struct bnx2x_virtf *vf; + int qidx = 0, abs_vfid; + u8 opcode; + u16 cid = 0xffff; + + if (!IS_SRIOV(bp)) + return 1; + + /* first get the cid - the only events we handle here are cfc-delete + * and set-mac completion + */ + opcode = elem->message.opcode; + + switch (opcode) { + case EVENT_RING_OPCODE_CFC_DEL: + cid = SW_CID((__force __le32) + elem->message.data.cfc_del_event.cid); + DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); + break; + case EVENT_RING_OPCODE_CLASSIFICATION_RULES: + case EVENT_RING_OPCODE_MULTICAST_RULES: + case EVENT_RING_OPCODE_FILTERS_RULES: + cid = (elem->message.data.eth_event.echo & + BNX2X_SWCID_MASK); + DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); + break; + case EVENT_RING_OPCODE_VF_FLR: + abs_vfid = elem->message.data.vf_flr_event.vf_id; + DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", + abs_vfid); + goto get_vf; + case EVENT_RING_OPCODE_MALICIOUS_VF: + abs_vfid = elem->message.data.malicious_vf_event.vf_id; + DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n", + abs_vfid); + goto get_vf; + default: + return 1; + } + + /* check if the cid is the VF range */ + if (!bnx2x_iov_is_vf_cid(bp, cid)) { + DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); + return 1; + } + + /* extract vf and rxq index from vf_cid - relies on the following: + * 1. vfid on cid reflects the true abs_vfid + * 2. the max number of VFs (per path) is 64 + */ + qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); + abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); +get_vf: + vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); + + if (!vf) { + BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", + cid, abs_vfid); + return 0; + } + + switch (opcode) { + case EVENT_RING_OPCODE_CFC_DEL: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", + vf->abs_vfid, qidx); + vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, + &vfq_get(vf, + qidx)->sp_obj, + BNX2X_Q_CMD_CFC_DEL); + break; + case EVENT_RING_OPCODE_CLASSIFICATION_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); + break; + case EVENT_RING_OPCODE_MULTICAST_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_mcast_eqe(bp, vf); + break; + case EVENT_RING_OPCODE_FILTERS_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_filters_eqe(bp, vf); + break; + case EVENT_RING_OPCODE_VF_FLR: + DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n", + vf->abs_vfid); + /* Do nothing for now */ + break; + case EVENT_RING_OPCODE_MALICIOUS_VF: + DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n", + vf->abs_vfid); + /* Do nothing for now */ + break; + } + /* SRIOV: reschedule any 'in_progress' operations */ + bnx2x_iov_sp_event(bp, cid, false); + + return 0; +} + +static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) +{ + /* extract the vf from vf_cid - relies on the following: + * 1. vfid on cid reflects the true abs_vfid + * 2. the max number of VFs (per path) is 64 + */ + int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); + return bnx2x_vf_by_abs_fid(bp, abs_vfid); +} + +void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, + struct bnx2x_queue_sp_obj **q_obj) +{ + struct bnx2x_virtf *vf; + + if (!IS_SRIOV(bp)) + return; + + vf = bnx2x_vf_by_cid(bp, vf_cid); + + if (vf) { + /* extract queue index from vf_cid - relies on the following: + * 1. vfid on cid reflects the true abs_vfid + * 2. the max number of VFs (per path) is 64 + */ + int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); + *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); + } else { + BNX2X_ERR("No vf matching cid %d\n", vf_cid); + } +} + +void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) +{ + struct bnx2x_virtf *vf; + + /* check if the cid is the VF range */ + if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) + return; + + vf = bnx2x_vf_by_cid(bp, vf_cid); + if (vf) { + /* set in_progress flag */ + atomic_set(&vf->op_in_progress, 1); + if (queue_work) + queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); + } +} + +void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) +{ + int i; + int first_queue_query_index, num_queues_req; + dma_addr_t cur_data_offset; + struct stats_query_entry *cur_query_entry; + u8 stats_count = 0; + bool is_fcoe = false; + + if (!IS_SRIOV(bp)) + return; + + if (!NO_FCOE(bp)) + is_fcoe = true; + + /* fcoe adds one global request and one queue request */ + num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; + first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - + (is_fcoe ? 0 : 1); + + DP(BNX2X_MSG_IOV, + "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", + BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, + first_queue_query_index + num_queues_req); + + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, queue_stats) + + num_queues_req * sizeof(struct per_queue_stats); + + cur_query_entry = &bp->fw_stats_req-> + query[first_queue_query_index + num_queues_req]; + + for_each_vf(bp, i) { + int j; + struct bnx2x_virtf *vf = BP_VF(bp, i); + + if (vf->state != VF_ENABLED) { + DP(BNX2X_MSG_IOV, + "vf %d not enabled so no stats for it\n", + vf->abs_vfid); + continue; + } + + DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); + for_each_vfq(vf, j) { + struct bnx2x_vf_queue *rxq = vfq_get(vf, j); + + /* collect stats fro active queues only */ + if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == + BNX2X_Q_LOGICAL_STATE_STOPPED) + continue; + + /* create stats query entry for this queue */ + cur_query_entry->kind = STATS_TYPE_QUEUE; + cur_query_entry->index = vfq_cl_id(vf, rxq); + cur_query_entry->funcID = + cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); + cur_query_entry->address.hi = + cpu_to_le32(U64_HI(vf->fw_stat_map)); + cur_query_entry->address.lo = + cpu_to_le32(U64_LO(vf->fw_stat_map)); + DP(BNX2X_MSG_IOV, + "added address %x %x for vf %d queue %d client %d\n", + cur_query_entry->address.hi, + cur_query_entry->address.lo, cur_query_entry->funcID, + j, cur_query_entry->index); + cur_query_entry++; + cur_data_offset += sizeof(struct per_queue_stats); + stats_count++; + } + } + bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; +} + +void bnx2x_iov_sp_task(struct bnx2x *bp) +{ + int i; + + if (!IS_SRIOV(bp)) + return; + /* Iterate over all VFs and invoke state transition for VFs with + * 'in-progress' slow-path operations + */ + DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); + for_each_vf(bp, i) { + struct bnx2x_virtf *vf = BP_VF(bp, i); + + if (!list_empty(&vf->op_list_head) && + atomic_read(&vf->op_in_progress)) { + DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); + bnx2x_vfop_cur(bp, vf)->transition(bp, vf); + } + } +} + +static inline +struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) +{ + int i; + struct bnx2x_virtf *vf = NULL; + + for_each_vf(bp, i) { + vf = BP_VF(bp, i); + if (stat_id >= vf->igu_base_id && + stat_id < vf->igu_base_id + vf_sb_count(vf)) + break; + } + return vf; +} + +/* VF API helpers */ +static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, + u8 enable) +{ + u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; + u32 val = enable ? (abs_vfid | (1 << 6)) : 0; + + REG_WR(bp, reg, val); +} + +static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int i; + + for_each_vfq(vf, i) + bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, + vfq_qzone_id(vf, vfq_get(vf, i)), false); +} + +static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + u32 val; + + /* clear the VF configuration - pretend */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); + val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); + val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | + IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); + REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); +} + +u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), + BNX2X_VF_MAX_QUEUES); +} + +static +int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vf_pf_resc_request *req_resc) +{ + u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); + u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); + + return ((req_resc->num_rxqs <= rxq_cnt) && + (req_resc->num_txqs <= txq_cnt) && + (req_resc->num_sbs <= vf_sb_count(vf)) && + (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && + (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); +} + +/* CORE VF API */ +int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vf_pf_resc_request *resc) +{ + int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * + BNX2X_CIDS_PER_VF; + + union cdu_context *base_cxt = (union cdu_context *) + BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + + (base_vf_cid & (ILT_PAGE_CIDS-1)); + int i; + + /* if state is 'acquired' the VF was not released or FLR'd, in + * this case the returned resources match the acquired already + * acquired resources. Verify that the requested numbers do + * not exceed the already acquired numbers. + */ + if (vf->state == VF_ACQUIRED) { + DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", + vf->abs_vfid); + + if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { + BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", + vf->abs_vfid); + return -EINVAL; + } + return 0; + } + + /* Otherwise vf state must be 'free' or 'reset' */ + if (vf->state != VF_FREE && vf->state != VF_RESET) { + BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", + vf->abs_vfid, vf->state); + return -EINVAL; + } + + /* static allocation: + * the global maximum number are fixed per VF. fail the request if + * requested number exceed these globals + */ + if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { + DP(BNX2X_MSG_IOV, + "cannot fulfill vf resource request. Placing maximal available values in response\n"); + /* set the max resource in the vf */ + return -ENOMEM; + } + + /* Set resources counters - 0 request means max available */ + vf_sb_count(vf) = resc->num_sbs; + vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); + vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); + if (resc->num_mac_filters) + vf_mac_rules_cnt(vf) = resc->num_mac_filters; + if (resc->num_vlan_filters) + vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; + + DP(BNX2X_MSG_IOV, + "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", + vf_sb_count(vf), vf_rxq_count(vf), + vf_txq_count(vf), vf_mac_rules_cnt(vf), + vf_vlan_rules_cnt(vf)); + + /* Initialize the queues */ + if (!vf->vfqs) { + DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); + return -EINVAL; + } + + for_each_vfq(vf, i) { + struct bnx2x_vf_queue *q = vfq_get(vf, i); + + if (!q) { + DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); + return -EINVAL; + } + + q->index = i; + q->cxt = &((base_cxt + i)->eth); + q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; + + DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", + vf->abs_vfid, i, q->index, q->cid, q->cxt); + + /* init SP objects */ + bnx2x_vfq_init(bp, vf, q); + } + vf->state = VF_ACQUIRED; + return 0; +} + +int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) +{ + struct bnx2x_func_init_params func_init = {0}; + u16 flags = 0; + int i; + + /* the sb resources are initialized at this point, do the + * FW/HW initializations + */ + for_each_vf_sb(vf, i) + bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, + vf_igu_sb(vf, i), vf_igu_sb(vf, i)); + + /* Sanity checks */ + if (vf->state != VF_ACQUIRED) { + DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", + vf->abs_vfid, vf->state); + return -EINVAL; + } + /* FLR cleanup epilogue */ + if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) + return -EBUSY; + + /* reset IGU VF statistics: MSIX */ + REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); + + /* vf init */ + if (vf->cfg_flags & VF_CFG_STATS) + flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); + + if (vf->cfg_flags & VF_CFG_TPA) + flags |= FUNC_FLG_TPA; + + if (is_vf_multi(vf)) + flags |= FUNC_FLG_RSS; + + /* function setup */ + func_init.func_flgs = flags; + func_init.pf_id = BP_FUNC(bp); + func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); + func_init.fw_stat_map = vf->fw_stat_map; + func_init.spq_map = vf->spq_map; + func_init.spq_prod = 0; + bnx2x_func_init(bp, &func_init); + + /* Enable the vf */ + bnx2x_vf_enable_access(bp, vf->abs_vfid); + bnx2x_vf_enable_traffic(bp, vf); + + /* queue protection table */ + for_each_vfq(vf, i) + bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, + vfq_qzone_id(vf, vfq_get(vf, i)), true); + + vf->state = VF_ENABLED; + + /* update vf bulletin board */ + bnx2x_post_vf_bulletin(bp, vf->index); + + return 0; +} + +/* VFOP close (teardown the queues, delete mcasts and close HW) */ +static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; + enum bnx2x_vfop_close_state state = vfop->state; + struct bnx2x_vfop_cmd cmd = { + .done = bnx2x_vfop_close, + .block = false, + }; + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + + switch (state) { + case BNX2X_VFOP_CLOSE_QUEUES: + + if (++(qx->qid) < vf_rxq_count(vf)) { + vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); + if (vfop->rc) + goto op_err; + return; + } + + /* remove multicasts */ + vfop->state = BNX2X_VFOP_CLOSE_HW; + vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); + if (vfop->rc) + goto op_err; + return; + + case BNX2X_VFOP_CLOSE_HW: + + /* disable the interrupts */ + DP(BNX2X_MSG_IOV, "disabling igu\n"); + bnx2x_vf_igu_disable(bp, vf); + + /* disable the VF */ + DP(BNX2X_MSG_IOV, "clearing qtbl\n"); + bnx2x_vf_clr_qtbl(bp, vf); + + goto op_done; + default: + bnx2x_vfop_default(state); + } +op_err: + BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); +op_done: + vf->state = VF_ACQUIRED; + DP(BNX2X_MSG_IOV, "set state to acquired\n"); + bnx2x_vfop_end(bp, vf, vfop); +} + +int bnx2x_vfop_close_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + if (vfop) { + vfop->args.qx.qid = -1; /* loop */ + bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, + bnx2x_vfop_close, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, + cmd->block); + } + return -ENOMEM; +} + +/* VF release can be called either: 1. the VF was acquired but + * not enabled 2. the vf was enabled or in the process of being + * enabled + */ +static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + struct bnx2x_vfop_cmd cmd = { + .done = bnx2x_vfop_release, + .block = false, + }; + + DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, + vf->state == VF_FREE ? "Free" : + vf->state == VF_ACQUIRED ? "Acquired" : + vf->state == VF_ENABLED ? "Enabled" : + vf->state == VF_RESET ? "Reset" : + "Unknown"); + + switch (vf->state) { + case VF_ENABLED: + vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); + if (vfop->rc) + goto op_err; + return; + + case VF_ACQUIRED: + DP(BNX2X_MSG_IOV, "about to free resources\n"); + bnx2x_vf_free_resc(bp, vf); + DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); + goto op_done; + + case VF_FREE: + case VF_RESET: + /* do nothing */ + goto op_done; + default: + bnx2x_vfop_default(vf->state); + } +op_err: + BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); +op_done: + bnx2x_vfop_end(bp, vf, vfop); +} + +int bnx2x_vfop_release_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + if (vfop) { + bnx2x_vfop_opset(-1, /* use vf->state */ + bnx2x_vfop_release, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, + cmd->block); + } + return -ENOMEM; +} + +/* VF release ~ VF close + VF release-resources + * Release is the ultimate SW shutdown and is called whenever an + * irrecoverable error is encountered. + */ +void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) +{ + struct bnx2x_vfop_cmd cmd = { + .done = NULL, + .block = block, + }; + int rc; + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); + + rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); + if (rc) + WARN(rc, + "VF[%d] Failed to allocate resources for release op- rc=%d\n", + vf->abs_vfid, rc); +} + +static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, + struct bnx2x_virtf *vf, u32 *sbdf) +{ + *sbdf = vf->devfn | (vf->bus << 8); +} + +static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_bar_info *bar_info) +{ + int n; + + bar_info->nr_bars = bp->vfdb->sriov.nres; + for (n = 0; n < bar_info->nr_bars; n++) + bar_info->bars[n] = vf->bars[n]; +} + +void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, + enum channel_tlvs tlv) +{ + /* lock the channel */ + mutex_lock(&vf->op_mutex); + + /* record the locking op */ + vf->op_current = tlv; + + /* log the lock */ + DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", + vf->abs_vfid, tlv); +} + +void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, + enum channel_tlvs expected_tlv) +{ + WARN(expected_tlv != vf->op_current, + "lock mismatch: expected %d found %d", expected_tlv, + vf->op_current); + + /* lock the channel */ + mutex_unlock(&vf->op_mutex); + + /* log the unlock */ + DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", + vf->abs_vfid, vf->op_current); + + /* record the locking op */ + vf->op_current = CHANNEL_TLV_NONE; +} + +void bnx2x_enable_sriov(struct bnx2x *bp) +{ + int rc = 0; + + /* disbale sriov in case it is still enabled */ + pci_disable_sriov(bp->pdev); + DP(BNX2X_MSG_IOV, "sriov disabled\n"); + + /* enable sriov */ + DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn)); + rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn)); + if (rc) + BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); + else + DP(BNX2X_MSG_IOV, "sriov enabled\n"); +} + +/* New mac for VF. Consider these cases: + * 1. VF hasn't been acquired yet - save the mac in local bulletin board and + * supply at acquire. + * 2. VF has already been acquired but has not yet initialized - store in local + * bulletin board. mac will be posted on VF bulletin board after VF init. VF + * will configure this mac when it is ready. + * 3. VF has already initialized but has not yet setup a queue - post the new + * mac on VF's bulletin board right now. VF will configure this mac when it + * is ready. + * 4. VF has already set a queue - delete any macs already configured for this + * queue and manually config the new mac. + * In any event, once this function has been called refuse any attempts by the + * VF to configure any mac for itself except for this mac. In case of a race + * where the VF fails to see the new post on its bulletin board before sending a + * mac configuration request, the PF will simply fail the request and VF can try + * again after consulting its bulletin board + */ +int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc, q_logical_state, vfidx = queue; + struct bnx2x_virtf *vf = BP_VF(bp, vfidx); + struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); + + /* if SRIOV is disabled there is nothing to do (and somewhere, someone + * has erred). + */ + if (!IS_SRIOV(bp)) { + BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n"); + return -EINVAL; + } + + if (!is_valid_ether_addr(mac)) { + BNX2X_ERR("mac address invalid\n"); + return -EINVAL; + } + + /* update PF's copy of the VF's bulletin. will no longer accept mac + * configuration requests from vf unless match this mac + */ + bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; + memcpy(bulletin->mac, mac, ETH_ALEN); + + /* Post update on VF's bulletin board */ + rc = bnx2x_post_vf_bulletin(bp, vfidx); + if (rc) { + BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); + return rc; + } + + /* is vf initialized and queue set up? */ + q_logical_state = + bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); + if (vf->state == VF_ENABLED && + q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { + /* configure the mac in device on this vf's queue */ + unsigned long flags = 0; + struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); + + /* must lock vfpf channel to protect against vf flows */ + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); + + /* remove existing eth macs */ + rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); + if (rc) { + BNX2X_ERR("failed to delete eth macs\n"); + return -EINVAL; + } + + /* remove existing uc list macs */ + rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); + if (rc) { + BNX2X_ERR("failed to delete uc_list macs\n"); + return -EINVAL; + } + + /* configure the new mac to device */ + __set_bit(RAMROD_COMP_WAIT, &flags); + bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, + BNX2X_ETH_MAC, &flags); + + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); + } + + return rc; +} + +/* crc is the first field in the bulletin board. compute the crc over the + * entire bulletin board excluding the crc field itself + */ +u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, + struct pf_vf_bulletin_content *bulletin) +{ + return crc32(BULLETIN_CRC_SEED, + ((u8 *)bulletin) + sizeof(bulletin->crc), + bulletin->length - sizeof(bulletin->crc)); +} + +/* Check for new posts on the bulletin board */ +enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) +{ + struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; + int attempts; + + /* bulletin board hasn't changed since last sample */ + if (bp->old_bulletin.version == bulletin.version) + return PFVF_BULLETIN_UNCHANGED; + + /* validate crc of new bulletin board */ + if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { + /* sampling structure in mid post may result with corrupted data + * validate crc to ensure coherency. + */ + for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { + bulletin = bp->pf2vf_bulletin->content; + if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, + &bulletin)) + break; + BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n", + bulletin.crc, + bnx2x_crc_vf_bulletin(bp, &bulletin)); + } + if (attempts >= BULLETIN_ATTEMPTS) { + BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", + attempts); + return PFVF_BULLETIN_CRC_ERR; + } + } + + /* the mac address in bulletin board is valid and is new */ + if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && + memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) { + /* update new mac to net device */ + memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); + } + + /* copy new bulletin board to bp */ + bp->old_bulletin = bulletin; + + return PFVF_BULLETIN_UPDATED; +} + +void bnx2x_vf_map_doorbells(struct bnx2x *bp) +{ + /* vf doorbells are embedded within the regview */ + bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START; +} + +int bnx2x_vf_pci_alloc(struct bnx2x *bp) +{ + /* allocate vf2pf mailbox for vf to pf channel */ + BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, + sizeof(struct bnx2x_vf_mbx_msg)); + + /* allocate pf 2 vf bulletin board */ + BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, + sizeof(union pf_vf_bulletin)); + + return 0; + +alloc_mem_err: + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, + sizeof(struct bnx2x_vf_mbx_msg)); + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, + sizeof(union pf_vf_bulletin)); + return -ENOMEM; +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h new file mode 100644 index 000000000000..b4050173add9 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -0,0 +1,809 @@ +/* bnx2x_sriov.h: Broadcom Everest network driver. + * + * Copyright 2009-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Written by: Shmulik Ravid <shmulikr@broadcom.com> + * Ariel Elior <ariele@broadcom.com> + */ +#ifndef BNX2X_SRIOV_H +#define BNX2X_SRIOV_H + +#include "bnx2x_vfpf.h" +#include "bnx2x.h" + +enum sample_bulletin_result { + PFVF_BULLETIN_UNCHANGED, + PFVF_BULLETIN_UPDATED, + PFVF_BULLETIN_CRC_ERR +}; + +#ifdef CONFIG_BNX2X_SRIOV + +/* The bnx2x device structure holds vfdb structure described below. + * The VF array is indexed by the relative vfid. + */ +#define BNX2X_VF_MAX_QUEUES 16 +#define BNX2X_VF_MAX_TPA_AGG_QUEUES 8 + +struct bnx2x_sriov { + u32 first_vf_in_pf; + + /* standard SRIOV capability fields, mostly for debugging */ + int pos; /* capability position */ + int nres; /* number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total; /* total VFs associated with the PF */ + u16 initial; /* initial VFs associated with the PF */ + u16 nr_virtfn; /* number of VFs available */ + u16 offset; /* first VF Routing ID offset */ + u16 stride; /* following VF stride */ + u32 pgsz; /* page size for BAR alignment */ + u8 link; /* Function Dependency Link */ +}; + +/* bars */ +struct bnx2x_vf_bar { + u64 bar; + u32 size; +}; + +struct bnx2x_vf_bar_info { + struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS]; + u8 nr_bars; +}; + +/* vf queue (used both for rx or tx) */ +struct bnx2x_vf_queue { + struct eth_context *cxt; + + /* MACs object */ + struct bnx2x_vlan_mac_obj mac_obj; + + /* VLANs object */ + struct bnx2x_vlan_mac_obj vlan_obj; + atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ + + /* Queue Slow-path State object */ + struct bnx2x_queue_sp_obj sp_obj; + + u32 cid; + u16 index; + u16 sb_idx; +}; + +/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: + * q-init, q-setup and SB index + */ +struct bnx2x_vfop_qctor_params { + struct bnx2x_queue_state_params qstate; + struct bnx2x_queue_setup_params prep_qsetup; +}; + +/* VFOP parameters (one copy per VF) */ +union bnx2x_vfop_params { + struct bnx2x_vlan_mac_ramrod_params vlan_mac; + struct bnx2x_rx_mode_ramrod_params rx_mode; + struct bnx2x_mcast_ramrod_params mcast; + struct bnx2x_config_rss_params rss; + struct bnx2x_vfop_qctor_params qctor; +}; + +/* forward */ +struct bnx2x_virtf; + +/* VFOP definitions */ +typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf); + +struct bnx2x_vfop_cmd { + vfop_handler_t done; + bool block; +}; + +/* VFOP queue filters command additional arguments */ +struct bnx2x_vfop_filter { + struct list_head link; + int type; +#define BNX2X_VFOP_FILTER_MAC 1 +#define BNX2X_VFOP_FILTER_VLAN 2 + + bool add; + u8 *mac; + u16 vid; +}; + +struct bnx2x_vfop_filters { + int add_cnt; + struct list_head head; + struct bnx2x_vfop_filter filters[]; +}; + +/* transient list allocated, built and saved until its + * passed to the SP-VERBs layer. + */ +struct bnx2x_vfop_args_mcast { + int mc_num; + struct bnx2x_mcast_list_elem *mc; +}; + +struct bnx2x_vfop_args_qctor { + int qid; + u16 sb_idx; +}; + +struct bnx2x_vfop_args_qdtor { + int qid; + struct eth_context *cxt; +}; + +struct bnx2x_vfop_args_defvlan { + int qid; + bool enable; + u16 vid; + u8 prio; +}; + +struct bnx2x_vfop_args_qx { + int qid; + bool en_add; +}; + +struct bnx2x_vfop_args_filters { + struct bnx2x_vfop_filters *multi_filter; + atomic_t *credit; /* non NULL means 'don't consume credit' */ +}; + +union bnx2x_vfop_args { + struct bnx2x_vfop_args_mcast mc_list; + struct bnx2x_vfop_args_qctor qctor; + struct bnx2x_vfop_args_qdtor qdtor; + struct bnx2x_vfop_args_defvlan defvlan; + struct bnx2x_vfop_args_qx qx; + struct bnx2x_vfop_args_filters filters; +}; + +struct bnx2x_vfop { + struct list_head link; + int rc; /* return code */ + int state; /* next state */ + union bnx2x_vfop_args args; /* extra arguments */ + union bnx2x_vfop_params *op_p; /* ramrod params */ + + /* state machine callbacks */ + vfop_handler_t transition; + vfop_handler_t done; +}; + +/* vf context */ +struct bnx2x_virtf { + u16 cfg_flags; +#define VF_CFG_STATS 0x0001 +#define VF_CFG_FW_FC 0x0002 +#define VF_CFG_TPA 0x0004 +#define VF_CFG_INT_SIMD 0x0008 +#define VF_CACHE_LINE 0x0010 + + u8 state; +#define VF_FREE 0 /* VF ready to be acquired holds no resc */ +#define VF_ACQUIRED 1 /* VF aquired, but not initalized */ +#define VF_ENABLED 2 /* VF Enabled */ +#define VF_RESET 3 /* VF FLR'd, pending cleanup */ + + /* non 0 during flr cleanup */ + u8 flr_clnup_stage; +#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup' + * sans the end-wait + */ +#define VF_FLR_ACK 2 /* ACK flr notification */ +#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW + * ~ final cleanup' end wait + */ + + /* dma */ + dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ + dma_addr_t spq_map; + dma_addr_t bulletin_map; + + /* Allocated resources counters. Before the VF is acquired, the + * counters hold the following values: + * + * - xxq_count = 0 as the queues memory is not allocated yet. + * + * - sb_count = The number of status blocks configured for this VF in + * the IGU CAM. Initially read during probe. + * + * - xx_rules_count = The number of rules statically and equally + * allocated for each VF, during PF load. + */ + struct vf_pf_resc_request alloc_resc; +#define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs) +#define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs) +#define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs) +#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) +#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) +#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) + + u8 sb_count; /* actual number of SBs */ + u8 igu_base_id; /* base igu status block id */ + + struct bnx2x_vf_queue *vfqs; +#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) + + u8 index; /* index in the vf array */ + u8 abs_vfid; + u8 sp_cl_id; + u32 error; /* 0 means all's-well */ + + /* BDF */ + unsigned int bus; + unsigned int devfn; + + /* bars */ + struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS]; + + /* set-mac ramrod state 1-pending, 0-done */ + unsigned long filter_state; + + /* leading rss client id ~~ the client id of the first rxq, must be + * set for each txq. + */ + int leading_rss; + + /* MCAST object */ + struct bnx2x_mcast_obj mcast_obj; + + /* RSS configuration object */ + struct bnx2x_rss_config_obj rss_conf_obj; + + /* slow-path operations */ + atomic_t op_in_progress; + int op_rc; + bool op_wait_blocking; + struct list_head op_list_head; + union bnx2x_vfop_params op_params; + struct mutex op_mutex; /* one vfop at a time mutex */ + enum channel_tlvs op_current; +}; + +#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) + +#define for_each_vf(bp, var) \ + for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++) + +#define for_each_vfq(vf, var) \ + for ((var) = 0; (var) < vf_rxq_count(vf); (var)++) + +#define for_each_vf_sb(vf, var) \ + for ((var) = 0; (var) < vf_sb_count(vf); (var)++) + +#define is_vf_multi(vf) (vf_rxq_count(vf) > 1) + +#define HW_VF_HANDLE(bp, abs_vfid) \ + (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4)) + +#define FW_PF_MAX_HANDLE 8 + +#define FW_VF_HANDLE(abs_vfid) \ + (abs_vfid + FW_PF_MAX_HANDLE) + +/* locking and unlocking the channel mutex */ +void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, + enum channel_tlvs tlv); + +void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, + enum channel_tlvs expected_tlv); + +/* VF mail box (aka vf-pf channel) */ + +/* a container for the bi-directional vf<-->pf messages. + * The actual response will be placed according to the offset parameter + * provided in the request + */ + +#define MBX_MSG_ALIGN 8 +#define MBX_MSG_ALIGNED_SIZE (roundup(sizeof(struct bnx2x_vf_mbx_msg), \ + MBX_MSG_ALIGN)) + +struct bnx2x_vf_mbx_msg { + union vfpf_tlvs req; + union pfvf_tlvs resp; +}; + +struct bnx2x_vf_mbx { + struct bnx2x_vf_mbx_msg *msg; + dma_addr_t msg_mapping; + + /* VF GPA address */ + u32 vf_addr_lo; + u32 vf_addr_hi; + + struct vfpf_first_tlv first_tlv; /* saved VF request header */ + + u8 flags; +#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent + * more then one pending msg + */ +}; + +struct bnx2x_vf_sp { + union { + struct eth_classify_rules_ramrod_data e2; + } mac_rdata; + + union { + struct eth_classify_rules_ramrod_data e2; + } vlan_rdata; + + union { + struct eth_filter_rules_ramrod_data e2; + } rx_mode_rdata; + + union { + struct eth_multicast_rules_ramrod_data e2; + } mcast_rdata; + + union { + struct client_init_ramrod_data init_data; + struct client_update_ramrod_data update_data; + } q_data; +}; + +struct hw_dma { + void *addr; + dma_addr_t mapping; + size_t size; +}; + +struct bnx2x_vfdb { +#define BP_VFDB(bp) ((bp)->vfdb) + /* vf array */ + struct bnx2x_virtf *vfs; +#define BP_VF(bp, idx) (&((bp)->vfdb->vfs[(idx)])) +#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[(idx)].var) + + /* queue array - for all vfs */ + struct bnx2x_vf_queue *vfqs; + + /* vf HW contexts */ + struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS]; +#define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[(i)]) + + /* SR-IOV information */ + struct bnx2x_sriov sriov; + struct hw_dma mbx_dma; +#define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma)) + struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS]; +#define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[(vfid)])) + + struct hw_dma bulletin_dma; +#define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma)) +#define BP_VF_BULLETIN(bp, vf) \ + (((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \ + + (vf)) + + struct hw_dma sp_dma; +#define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \ + (vf)->index * sizeof(struct bnx2x_vf_sp) + \ + offsetof(struct bnx2x_vf_sp, field)) +#define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \ + (vf)->index * sizeof(struct bnx2x_vf_sp) + \ + offsetof(struct bnx2x_vf_sp, field)) + +#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) + u32 flrd_vfs[FLRD_VFS_DWORDS]; +}; + +/* queue access */ +static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index) +{ + return &(vf->vfqs[index]); +} + +static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq) +{ + return (vfq->index == 0); +} + +/* FW ids */ +static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) +{ + return vf->igu_base_id + sb_idx; +} + +static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx) +{ + return vf_igu_sb(vf, sb_idx); +} + +static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) +{ + return vf->igu_base_id + q->index; +} + +static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) +{ + return vfq_cl_id(vf, q); +} + +static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) +{ + return vfq_cl_id(vf, q); +} + +/* global iov routines */ +int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line); +int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param); +void bnx2x_iov_remove_one(struct bnx2x *bp); +void bnx2x_iov_free_mem(struct bnx2x *bp); +int bnx2x_iov_alloc_mem(struct bnx2x *bp); +int bnx2x_iov_nic_init(struct bnx2x *bp); +int bnx2x_iov_chip_cleanup(struct bnx2x *bp); +void bnx2x_iov_init_dq(struct bnx2x *bp); +void bnx2x_iov_init_dmae(struct bnx2x *bp); +void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, + struct bnx2x_queue_sp_obj **q_obj); +void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work); +int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); +void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); +void bnx2x_iov_storm_stats_update(struct bnx2x *bp); +void bnx2x_iov_sp_task(struct bnx2x *bp); +/* global vf mailbox routines */ +void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event); +void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); + +/* CORE VF API */ +typedef u8 bnx2x_mac_addr_t[ETH_ALEN]; + +/* acquire */ +int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vf_pf_resc_request *resc); +/* init */ +int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, + dma_addr_t *sb_map); + +/* VFOP generic helpers */ +#define bnx2x_vfop_default(state) do { \ + BNX2X_ERR("Bad state %d\n", (state)); \ + vfop->rc = -EINVAL; \ + goto op_err; \ + } while (0) + +enum { + VFOP_DONE, + VFOP_CONT, + VFOP_VERIFY_PEND, +}; + +#define bnx2x_vfop_finalize(vf, rc, next) do { \ + if ((rc) < 0) \ + goto op_err; \ + else if ((rc) > 0) \ + goto op_pending; \ + else if ((next) == VFOP_DONE) \ + goto op_done; \ + else if ((next) == VFOP_VERIFY_PEND) \ + BNX2X_ERR("expected pending\n"); \ + else { \ + DP(BNX2X_MSG_IOV, "no ramrod. scheduling\n"); \ + atomic_set(&vf->op_in_progress, 1); \ + queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \ + return; \ + } \ + } while (0) + +#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \ + do { \ + vfop->state = first_state; \ + vfop->op_p = &vf->op_params; \ + vfop->transition = trans_hndlr; \ + vfop->done = done_hndlr; \ + } while (0) + +static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); + WARN_ON(list_empty(&vf->op_list_head)); + return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link); +} + +static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL); + + WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); + if (vfop) { + INIT_LIST_HEAD(&vfop->link); + list_add(&vfop->link, &vf->op_list_head); + } + return vfop; +} + +static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vfop *vfop) +{ + /* rc < 0 - error, otherwise set to 0 */ + DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc); + if (vfop->rc >= 0) + vfop->rc = 0; + DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc); + + /* unlink the current op context and propagate error code + * must be done before invoking the 'done()' handler + */ + WARN(!mutex_is_locked(&vf->op_mutex), + "about to access vf op linked list but mutex was not locked!"); + list_del(&vfop->link); + + if (list_empty(&vf->op_list_head)) { + DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc); + vf->op_rc = vfop->rc; + DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", + vf->op_rc, vfop->rc); + } else { + struct bnx2x_vfop *cur_vfop; + + DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc); + cur_vfop = bnx2x_vfop_cur(bp, vf); + cur_vfop->rc = vfop->rc; + DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", + vf->op_rc, vfop->rc); + } + + /* invoke done handler */ + if (vfop->done) { + DP(BNX2X_MSG_IOV, "calling done handler\n"); + vfop->done(bp, vf); + } else { + /* there is no done handler for the operation to unlock + * the mutex. Must have gotten here from PF initiated VF RELEASE + */ + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); + } + + DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n", + vf->op_rc, vfop->rc); + + /* if this is the last nested op reset the wait_blocking flag + * to release any blocking wrappers, only after 'done()' is invoked + */ + if (list_empty(&vf->op_list_head)) { + DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc); + vf->op_wait_blocking = false; + } + + kfree(vfop); +} + +static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + /* can take a while if any port is running */ + int cnt = 5000; + + might_sleep(); + while (cnt--) { + if (vf->op_wait_blocking == false) { +#ifdef BNX2X_STOP_ON_ERROR + DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt); +#endif + return 0; + } + usleep_range(1000, 2000); + + if (bp->panic) + return -EIO; + } + + /* timeout! */ +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + + return -EBUSY; +} + +static inline int bnx2x_vfop_transition(struct bnx2x *bp, + struct bnx2x_virtf *vf, + vfop_handler_t transition, + bool block) +{ + if (block) + vf->op_wait_blocking = true; + transition(bp, vf); + if (block) + return bnx2x_vfop_wait_blocking(bp, vf); + return 0; +} + +/* VFOP queue construction helpers */ +void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_queue_init_params *init_params, + struct bnx2x_queue_setup_params *setup_params, + u16 q_idx, u16 sb_idx); + +void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_queue_init_params *init_params, + struct bnx2x_queue_setup_params *setup_params, + u16 q_idx, u16 sb_idx); + +void bnx2x_vfop_qctor_prep(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q, + struct bnx2x_vfop_qctor_params *p, + unsigned long q_type); +int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + struct bnx2x_vfop_filters *macs, + int qid, bool drv_only); + +int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid, u16 vid, bool add); + +int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + struct bnx2x_vfop_filters *vlans, + int qid, bool drv_only); + +int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid); + +int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid); + +int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + bnx2x_mac_addr_t *mcasts, + int mcast_num, bool drv_only); + +int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd, + int qid, unsigned long accept_flags); + +int bnx2x_vfop_close_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd); + +int bnx2x_vfop_release_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd); + +/* VF release ~ VF close + VF release-resources + * + * Release is the ultimate SW shutdown and is called whenever an + * irrecoverable error is encountered. + */ +void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block); +int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); +u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); + +/* FLR routines */ + +/* VF FLR helpers */ +int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid); +void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid); + +/* Handles an FLR (or VF_DISABLE) notification form the MCP */ +void bnx2x_vf_handle_flr_event(struct bnx2x *bp); + +void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type, + u16 length); +void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, + u16 type, u16 length); +void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list); + +bool bnx2x_tlv_supported(u16 tlvtype); + +u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, + struct pf_vf_bulletin_content *bulletin); +int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf); + + +enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); + +/* VF side vfpf channel functions */ +int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count); +int bnx2x_vfpf_release(struct bnx2x *bp); +int bnx2x_vfpf_release(struct bnx2x *bp); +int bnx2x_vfpf_init(struct bnx2x *bp); +void bnx2x_vfpf_close_vf(struct bnx2x *bp); +int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx); +int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); +int bnx2x_vfpf_set_mac(struct bnx2x *bp); +int bnx2x_vfpf_set_mcast(struct net_device *dev); +int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); + +static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, + size_t buf_len) +{ + strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len); +} + +static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, + struct bnx2x_fastpath *fp) +{ + return PXP_VF_ADDR_USDM_QUEUES_START + + bp->acquire_resp.resc.hw_qid[fp->index] * + sizeof(struct ustorm_queue_zone_data); +} + +enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); +void bnx2x_vf_map_doorbells(struct bnx2x *bp); +int bnx2x_vf_pci_alloc(struct bnx2x *bp); +void bnx2x_enable_sriov(struct bnx2x *bp); +static inline int bnx2x_vf_headroom(struct bnx2x *bp) +{ + return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF; +} + +#else /* CONFIG_BNX2X_SRIOV */ + +static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, + struct bnx2x_queue_sp_obj **q_obj) {} +static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, + bool queue_work) {} +static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} +static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, + union event_ring_elem *elem) {return 1; } +static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {} +static inline void bnx2x_vf_mbx(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event) {} +static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; } +static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {} +static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } +static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {} +static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; } +static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {} +static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, + int num_vfs_param) {return 0; } +static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {} +static inline void bnx2x_enable_sriov(struct bnx2x *bp) {} +static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, + u8 tx_count, u8 rx_count) {return 0; } +static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } +static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } +static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} +static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; } +static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; } +static inline int bnx2x_vfpf_set_mac(struct bnx2x *bp) {return 0; } +static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; } +static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; } +static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; } +static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; } +static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {} +static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, + size_t buf_len) {} +static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, + struct bnx2x_fastpath *fp) {return 0; } +static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) +{ + return PFVF_BULLETIN_UNCHANGED; +} + +static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; } +static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } + +#endif /* CONFIG_BNX2X_SRIOV */ +#endif /* bnx2x_sriov.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 89ec0667140a..4397f8b76f2e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1,6 +1,6 @@ /* bnx2x_stats.c: Broadcom Everest network driver. * - * Copyright (c) 2007-2012 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,7 +19,7 @@ #include "bnx2x_stats.h" #include "bnx2x_cmn.h" - +#include "bnx2x_sriov.h" /* Statistics */ @@ -79,6 +79,42 @@ static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) * Init service functions */ +static void bnx2x_dp_stats(struct bnx2x *bp) +{ + int i; + + DP(BNX2X_MSG_STATS, "dumping stats:\n" + "fw_stats_req\n" + " hdr\n" + " cmd_num %d\n" + " reserved0 %d\n" + " drv_stats_counter %d\n" + " reserved1 %d\n" + " stats_counters_addrs %x %x\n", + bp->fw_stats_req->hdr.cmd_num, + bp->fw_stats_req->hdr.reserved0, + bp->fw_stats_req->hdr.drv_stats_counter, + bp->fw_stats_req->hdr.reserved1, + bp->fw_stats_req->hdr.stats_counters_addrs.hi, + bp->fw_stats_req->hdr.stats_counters_addrs.lo); + + for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) { + DP(BNX2X_MSG_STATS, + "query[%d]\n" + " kind %d\n" + " index %d\n" + " funcID %d\n" + " reserved %d\n" + " address %x %x\n", + i, bp->fw_stats_req->query[i].kind, + bp->fw_stats_req->query[i].index, + bp->fw_stats_req->query[i].funcID, + bp->fw_stats_req->query[i].reserved, + bp->fw_stats_req->query[i].address.hi, + bp->fw_stats_req->query[i].address.lo); + } +} + /* Post the next statistics ramrod. Protect it with the spin in * order to ensure the strict order between statistics ramrods * (each ramrod has a sequence number passed in a @@ -103,7 +139,9 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", bp->fw_stats_req->hdr.drv_stats_counter); - + /* adjust the ramrod to include VF queues statistics */ + bnx2x_iov_adjust_stats_req(bp); + bnx2x_dp_stats(bp); /* send FW stats ramrod */ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, @@ -174,7 +212,7 @@ static int bnx2x_stats_comp(struct bnx2x *bp) break; } cnt--; - usleep_range(1000, 1000); + usleep_range(1000, 2000); } return 1; } @@ -482,6 +520,12 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) static void bnx2x_stats_start(struct bnx2x *bp) { + /* vfs travel through here as part of the statistics FSM, but no action + * is required + */ + if (IS_VF(bp)) + return; + if (bp->port.pmf) bnx2x_port_stats_init(bp); @@ -501,6 +545,11 @@ static void bnx2x_stats_pmf_start(struct bnx2x *bp) static void bnx2x_stats_restart(struct bnx2x *bp) { + /* vfs travel through here as part of the statistics FSM, but no action + * is required + */ + if (IS_VF(bp)) + return; bnx2x_stats_comp(bp); bnx2x_stats_start(bp); } @@ -832,19 +881,10 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) return 0; } -static int bnx2x_storm_stats_update(struct bnx2x *bp) +static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) { - struct tstorm_per_port_stats *tport = - &bp->fw_stats_data->port.tstorm_port_statistics; - struct tstorm_per_pf_stats *tfunc = - &bp->fw_stats_data->pf.tstorm_pf_statistics; - struct host_func_stats *fstats = &bp->func_stats; - struct bnx2x_eth_stats *estats = &bp->eth_stats; - struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; struct stats_counter *counters = &bp->fw_stats_data->storm_counters; - int i; u16 cur_stats_counter; - /* Make sure we use the value of the counter * used for sending the last stats ramrod. */ @@ -880,6 +920,23 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) le16_to_cpu(counters->tstats_counter), bp->stats_counter); return -EAGAIN; } + return 0; +} + +static int bnx2x_storm_stats_update(struct bnx2x *bp) +{ + struct tstorm_per_port_stats *tport = + &bp->fw_stats_data->port.tstorm_port_statistics; + struct tstorm_per_pf_stats *tfunc = + &bp->fw_stats_data->pf.tstorm_pf_statistics; + struct host_func_stats *fstats = &bp->func_stats; + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; + int i; + + /* vfs stat counter is managed by pf */ + if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp)) + return -EAGAIN; estats->error_bytes_received_hi = 0; estats->error_bytes_received_lo = 0; @@ -953,8 +1010,8 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received); UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard, - etherstatsoverrsizepkts); - UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard); + etherstatsoverrsizepkts, 32); + UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16); SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received); @@ -1033,15 +1090,15 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) estats->total_bytes_received_lo, estats->rx_stat_ifhcinbadoctets_lo); - ADD_64(estats->total_bytes_received_hi, - le32_to_cpu(tfunc->rcv_error_bytes.hi), - estats->total_bytes_received_lo, - le32_to_cpu(tfunc->rcv_error_bytes.lo)); + ADD_64_LE(estats->total_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->total_bytes_received_lo, + tfunc->rcv_error_bytes.lo); - ADD_64(estats->error_bytes_received_hi, - le32_to_cpu(tfunc->rcv_error_bytes.hi), - estats->error_bytes_received_lo, - le32_to_cpu(tfunc->rcv_error_bytes.lo)); + ADD_64_LE(estats->error_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->error_bytes_received_lo, + tfunc->rcv_error_bytes.lo); UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong); @@ -1174,23 +1231,34 @@ static void bnx2x_stats_update(struct bnx2x *bp) if (bnx2x_edebug_stats_stopped(bp)) return; - if (*stats_comp != DMAE_COMP_VAL) - return; + if (IS_PF(bp)) { + if (*stats_comp != DMAE_COMP_VAL) + return; - if (bp->port.pmf) - bnx2x_hw_stats_update(bp); + if (bp->port.pmf) + bnx2x_hw_stats_update(bp); - if (bnx2x_storm_stats_update(bp)) { - if (bp->stats_pending++ == 3) { - BNX2X_ERR("storm stats were not updated for 3 times\n"); - bnx2x_panic(); + if (bnx2x_storm_stats_update(bp)) { + if (bp->stats_pending++ == 3) { + BNX2X_ERR("storm stats were not updated for 3 times\n"); + bnx2x_panic(); + } + return; } - return; + } else { + /* vf doesn't collect HW statistics, and doesn't get completions + * perform only update + */ + bnx2x_storm_stats_update(bp); } bnx2x_net_stats_update(bp); bnx2x_drv_stats_update(bp); + /* vf is done */ + if (IS_VF(bp)) + return; + if (netif_msg_timer(bp)) { struct bnx2x_eth_stats *estats = &bp->eth_stats; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index b4d7b26c7fe7..364e37ecbc5c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -1,6 +1,6 @@ /* bnx2x_stats.h: Broadcom Everest network driver. * - * Copyright (c) 2007-2012 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -421,16 +421,19 @@ struct bnx2x_fw_port_stats_old { new->s); \ } while (0) -#define UPDATE_EXTEND_TSTAT(s, t) \ +#define UPDATE_EXTEND_TSTAT_X(s, t, size) \ do { \ - diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ + diff = le##size##_to_cpu(tclient->s) - \ + le##size##_to_cpu(old_tclient->s); \ old_tclient->s = tclient->s; \ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ } while (0) -#define UPDATE_EXTEND_E_TSTAT(s, t) \ +#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32) + +#define UPDATE_EXTEND_E_TSTAT(s, t, size) \ do { \ - UPDATE_EXTEND_TSTAT(s, t); \ + UPDATE_EXTEND_TSTAT_X(s, t, size); \ ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \ } while (0) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c new file mode 100644 index 000000000000..36246129864c --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -0,0 +1,1651 @@ +/* bnx2x_vfpf.c: Broadcom Everest network driver. + * + * Copyright 2009-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Written by: Shmulik Ravid <shmulikr@broadcom.com> + * Ariel Elior <ariele@broadcom.com> + */ + +#include "bnx2x.h" +#include "bnx2x_cmn.h" +#include <linux/crc32.h> + +/* place a given tlv on the tlv buffer at a given offset */ +void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type, + u16 length) +{ + struct channel_tlv *tl = + (struct channel_tlv *)(tlvs_list + offset); + + tl->type = type; + tl->length = length; +} + +/* Clear the mailbox and init the header of the first tlv */ +void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, + u16 type, u16 length) +{ + DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n", + type); + + /* Clear mailbox */ + memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg)); + + /* init type and length */ + bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length); + + /* init first tlv header */ + first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req); +} + +/* list the types and lengths of the tlvs on the buffer */ +void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) +{ + int i = 1; + struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list; + + while (tlv->type != CHANNEL_TLV_LIST_END) { + /* output tlv */ + DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i, + tlv->type, tlv->length); + + /* advance to next tlv */ + tlvs_list += tlv->length; + + /* cast general tlv list pointer to channel tlv header*/ + tlv = (struct channel_tlv *)tlvs_list; + + i++; + + /* break condition for this loop */ + if (i > MAX_TLVS_IN_LIST) { + WARN(true, "corrupt tlvs"); + return; + } + } + + /* output last tlv */ + DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i, + tlv->type, tlv->length); +} + +/* test whether we support a tlv type */ +bool bnx2x_tlv_supported(u16 tlvtype) +{ + return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; +} + +static inline int bnx2x_pfvf_status_codes(int rc) +{ + switch (rc) { + case 0: + return PFVF_STATUS_SUCCESS; + case -ENOMEM: + return PFVF_STATUS_NO_RESOURCE; + default: + return PFVF_STATUS_FAILURE; + } +} + +int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) +{ + struct cstorm_vf_zone_data __iomem *zone_data = + REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START); + int tout = 600, interval = 100; /* wait for 60 seconds */ + + if (*done) { + BNX2X_ERR("done was non zero before message to pf was sent\n"); + WARN_ON(true); + return -EINVAL; + } + + /* Write message address */ + writel(U64_LO(msg_mapping), + &zone_data->non_trigger.vf_pf_channel.msg_addr_lo); + writel(U64_HI(msg_mapping), + &zone_data->non_trigger.vf_pf_channel.msg_addr_hi); + + /* make sure the address is written before FW accesses it */ + wmb(); + + /* Trigger the PF FW */ + writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid); + + /* Wait for PF to complete */ + while ((tout >= 0) && (!*done)) { + msleep(interval); + tout -= 1; + + /* progress indicator - HV can take its own sweet time in + * answering VFs... + */ + DP_CONT(BNX2X_MSG_IOV, "."); + } + + if (!*done) { + BNX2X_ERR("PF response has timed out\n"); + return -EAGAIN; + } + DP(BNX2X_MSG_SP, "Got a response from PF\n"); + return 0; +} + +int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) +{ + u32 me_reg; + int tout = 10, interval = 100; /* Wait for 1 sec */ + + do { + /* pxp traps vf read of doorbells and returns me reg value */ + me_reg = readl(bp->doorbells); + if (GOOD_ME_REG(me_reg)) + break; + + msleep(interval); + + BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?", + me_reg); + } while (tout-- > 0); + + if (!GOOD_ME_REG(me_reg)) { + BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg); + return -EINVAL; + } + + BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); + + *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; + + return 0; +} + +int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) +{ + int rc = 0, attempts = 0; + struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; + struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp; + u32 vf_id; + bool resources_acquired = false; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req)); + + if (bnx2x_get_vf_id(bp, &vf_id)) + return -EAGAIN; + + req->vfdev_info.vf_id = vf_id; + req->vfdev_info.vf_os = 0; + + req->resc_request.num_rxqs = rx_count; + req->resc_request.num_txqs = tx_count; + req->resc_request.num_sbs = bp->igu_sb_cnt; + req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS; + req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS; + + /* pf 2 vf bulletin board address */ + req->bulletin_addr = bp->pf2vf_bulletin_mapping; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + while (!resources_acquired) { + DP(BNX2X_MSG_SP, "attempting to acquire resources\n"); + + /* send acquire request */ + rc = bnx2x_send_msg2pf(bp, + &resp->hdr.status, + bp->vf2pf_mbox_mapping); + + /* PF timeout */ + if (rc) + return rc; + + /* copy acquire response from buffer to bp */ + memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp)); + + attempts++; + + /* test whether the PF accepted our request. If not, humble the + * the request and try again. + */ + if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { + DP(BNX2X_MSG_SP, "resources acquired\n"); + resources_acquired = true; + } else if (bp->acquire_resp.hdr.status == + PFVF_STATUS_NO_RESOURCE && + attempts < VF_ACQUIRE_THRESH) { + DP(BNX2X_MSG_SP, + "PF unwilling to fulfill resource request. Try PF recommended amount\n"); + + /* humble our request */ + req->resc_request.num_txqs = + bp->acquire_resp.resc.num_txqs; + req->resc_request.num_rxqs = + bp->acquire_resp.resc.num_rxqs; + req->resc_request.num_sbs = + bp->acquire_resp.resc.num_sbs; + req->resc_request.num_mac_filters = + bp->acquire_resp.resc.num_mac_filters; + req->resc_request.num_vlan_filters = + bp->acquire_resp.resc.num_vlan_filters; + req->resc_request.num_mc_filters = + bp->acquire_resp.resc.num_mc_filters; + + /* Clear response buffer */ + memset(&bp->vf2pf_mbox->resp, 0, + sizeof(union pfvf_tlvs)); + } else { + /* PF reports error */ + BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n", + bp->acquire_resp.hdr.status); + return -EAGAIN; + } + } + + /* get HW info */ + bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff); + bp->link_params.chip_id = bp->common.chip_id; + bp->db_size = bp->acquire_resp.pfdev_info.db_size; + bp->common.int_block = INT_BLOCK_IGU; + bp->common.chip_port_mode = CHIP_2_PORT_MODE; + bp->igu_dsb_id = -1; + bp->mf_ov = 0; + bp->mf_mode = 0; + bp->common.flash_size = 0; + bp->flags |= + NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; + bp->igu_sb_cnt = 1; + bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; + strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, + sizeof(bp->fw_ver)); + + if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr)) + memcpy(bp->dev->dev_addr, + bp->acquire_resp.resc.current_mac_addr, + ETH_ALEN); + + return 0; +} + +int bnx2x_vfpf_release(struct bnx2x *bp) +{ + struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + u32 rc = 0, vf_id; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req)); + + if (bnx2x_get_vf_id(bp, &vf_id)) + return -EAGAIN; + + req->vf_id = vf_id; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + /* send release request */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + + if (rc) + /* PF timeout */ + return rc; + if (resp->hdr.status == PFVF_STATUS_SUCCESS) { + /* PF released us */ + DP(BNX2X_MSG_SP, "vf released\n"); + } else { + /* PF reports error */ + BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n", + resp->hdr.status); + return -EAGAIN; + } + + return 0; +} + +/* Tell PF about SB addresses */ +int bnx2x_vfpf_init(struct bnx2x *bp) +{ + struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc, i; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req)); + + /* status blocks */ + for_each_eth_queue(bp, i) + req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i, + status_blk_mapping); + + /* statistics - requests only supports single queue for now */ + req->stats_addr = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, queue_stats); + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("INIT VF failed: %d. Breaking...\n", + resp->hdr.status); + return -EAGAIN; + } + + DP(BNX2X_MSG_SP, "INIT VF Succeeded\n"); + return 0; +} + +/* CLOSE VF - opposite to INIT_VF */ +void bnx2x_vfpf_close_vf(struct bnx2x *bp) +{ + struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int i, rc; + u32 vf_id; + + /* If we haven't got a valid VF id, there is no sense to + * continue with sending messages + */ + if (bnx2x_get_vf_id(bp, &vf_id)) + goto free_irq; + + /* Close the queues */ + for_each_queue(bp, i) + bnx2x_vfpf_teardown_queue(bp, i); + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req)); + + req->vf_id = vf_id; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + + if (rc) + BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc); + + else if (resp->hdr.status != PFVF_STATUS_SUCCESS) + BNX2X_ERR("Sending CLOSE failed: pf response was %d\n", + resp->hdr.status); + +free_irq: + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 0); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + + /* Release IRQs */ + bnx2x_free_irq(bp); +} + +/* ask the pf to open a queue for the vf */ +int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) +{ + struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; + u16 tpa_agg_size = 0, flags = 0; + int rc; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); + + /* select tpa mode to request */ + if (!fp->disable_tpa) { + flags |= VFPF_QUEUE_FLG_TPA; + flags |= VFPF_QUEUE_FLG_TPA_IPV6; + if (fp->mode == TPA_MODE_GRO) + flags |= VFPF_QUEUE_FLG_TPA_GRO; + tpa_agg_size = TPA_AGG_SIZE; + } + + /* calculate queue flags */ + flags |= VFPF_QUEUE_FLG_STATS; + flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; + flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0; + flags |= VFPF_QUEUE_FLG_VLAN; + DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); + + /* Common */ + req->vf_qid = fp_idx; + req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID; + + /* Rx */ + req->rxq.rcq_addr = fp->rx_comp_mapping; + req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE; + req->rxq.rxq_addr = fp->rx_desc_mapping; + req->rxq.sge_addr = fp->rx_sge_mapping; + req->rxq.vf_sb = fp_idx; + req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS; + req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0; + req->rxq.mtu = bp->dev->mtu; + req->rxq.buf_sz = fp->rx_buf_size; + req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE; + req->rxq.tpa_agg_sz = tpa_agg_size; + req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; + req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) & + (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; + req->rxq.flags = flags; + req->rxq.drop_flags = 0; + req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT; + req->rxq.stat_id = -1; /* No stats at the moment */ + + /* Tx */ + req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping; + req->txq.vf_sb = fp_idx; + req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0; + req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0; + req->txq.flags = flags; + req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) + BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n", + fp_idx); + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n", + fp_idx, resp->hdr.status); + return -EINVAL; + } + return rc; +} + +int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) +{ + struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q, + sizeof(*req)); + + req->vf_qid = qidx; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + + if (rc) { + BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx, + rc); + return rc; + } + + /* PF failed the transaction */ + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx, + resp->hdr.status); + return -EINVAL; + } + + return 0; +} + +/* request pf to add a mac for the vf */ +int bnx2x_vfpf_set_mac(struct bnx2x *bp) +{ + struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, + sizeof(*req)); + + req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED; + req->vf_qid = 0; + req->n_mac_vlan_filters = 1; + req->filters[0].flags = + VFPF_Q_FILTER_DEST_MAC_VALID | VFPF_Q_FILTER_SET_MAC; + + /* sample bulletin board for new mac */ + bnx2x_sample_bulletin(bp); + + /* copy mac from device to request */ + memcpy(req->filters[0].mac, bp->dev->dev_addr, ETH_ALEN); + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + /* send message to pf */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) { + BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); + return rc; + } + + /* failure may mean PF was configured with a new mac for us */ + while (resp->hdr.status == PFVF_STATUS_FAILURE) { + DP(BNX2X_MSG_IOV, + "vfpf SET MAC failed. Check bulletin board for new posts\n"); + + /* check if bulletin board was updated */ + if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) { + /* copy mac from device to request */ + memcpy(req->filters[0].mac, bp->dev->dev_addr, + ETH_ALEN); + + /* send message to pf */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, + bp->vf2pf_mbox_mapping); + } else { + /* no new info in bulletin */ + break; + } + } + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status); + return -EINVAL; + } + + return 0; +} + +int bnx2x_vfpf_set_mcast(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc, i = 0; + struct netdev_hw_addr *ha; + + if (bp->state != BNX2X_STATE_OPEN) { + DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); + return -EINVAL; + } + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, + sizeof(*req)); + + /* Get Rx mode requested */ + DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags); + + netdev_for_each_mc_addr(ha, dev) { + DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", + bnx2x_mc_addr(ha)); + memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN); + i++; + } + + /* We support four PFVF_MAX_MULTICAST_PER_VF mcast + * addresses tops + */ + if (i >= PFVF_MAX_MULTICAST_PER_VF) { + DP(NETIF_MSG_IFUP, + "VF supports not more than %d multicast MAC addresses\n", + PFVF_MAX_MULTICAST_PER_VF); + return -EINVAL; + } + + req->n_multicast = i; + req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED; + req->vf_qid = 0; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) { + BNX2X_ERR("Sending a message failed: %d\n", rc); + return rc; + } + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("Set Rx mode/multicast failed: %d\n", + resp->hdr.status); + return -EINVAL; + } + + return 0; +} + +int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) +{ + int mode = bp->rx_mode; + struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, + sizeof(*req)); + + DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode); + + switch (mode) { + case BNX2X_RX_MODE_NONE: /* no Rx */ + req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE; + break; + case BNX2X_RX_MODE_NORMAL: + req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST; + req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; + req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; + break; + case BNX2X_RX_MODE_ALLMULTI: + req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST; + req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; + req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; + break; + case BNX2X_RX_MODE_PROMISC: + req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST; + req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST; + req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; + break; + default: + BNX2X_ERR("BAD rx mode (%d)\n", mode); + return -EINVAL; + } + + req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; + req->vf_qid = 0; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) + BNX2X_ERR("Sending a message failed: %d\n", rc); + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status); + return -EINVAL; + } + + return rc; +} + +/* General service functions */ +static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid) +{ + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid); + + REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY); +} + +static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid) +{ + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid); + + REG_WR8(bp, addr, 1); +} + +static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp) +{ + int i; + + for_each_vf(bp, i) + storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); +} + +/* enable vf_pf mailbox (aka vf-pf-chanell) */ +void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) +{ + bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); + + /* enable the mailbox in the FW */ + storm_memset_vf_mbx_ack(bp, abs_vfid); + storm_memset_vf_mbx_valid(bp, abs_vfid); + + /* enable the VF access to the mailbox */ + bnx2x_vf_enable_access(bp, abs_vfid); +} + +/* this works only on !E1h */ +static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf, + dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi, + u32 vf_addr_lo, u32 len32) +{ + struct dmae_command dmae; + + if (CHIP_IS_E1x(bp)) { + BNX2X_ERR("Chip revision does not support VFs\n"); + return DMAE_NOT_RDY; + } + + if (!bp->dmae_ready) { + BNX2X_ERR("DMAE is not ready, can not copy\n"); + return DMAE_NOT_RDY; + } + + /* set opcode and fixed command fields */ + bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI); + + if (from_vf) { + dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) | + (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) | + (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT); + + dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT); + + dmae.src_addr_lo = vf_addr_lo; + dmae.src_addr_hi = vf_addr_hi; + dmae.dst_addr_lo = U64_LO(pf_addr); + dmae.dst_addr_hi = U64_HI(pf_addr); + } else { + dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) | + (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) | + (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT); + + dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT); + + dmae.src_addr_lo = U64_LO(pf_addr); + dmae.src_addr_hi = U64_HI(pf_addr); + dmae.dst_addr_lo = vf_addr_lo; + dmae.dst_addr_hi = vf_addr_hi; + } + dmae.len = len32; + bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE); + + /* issue the command and wait for completion */ + return bnx2x_issue_dmae_with_comp(bp, &dmae); +} + +static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); + u64 vf_addr; + dma_addr_t pf_addr; + u16 length, type; + int rc; + struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; + + /* prepare response */ + type = mbx->first_tlv.tl.type; + length = type == CHANNEL_TLV_ACQUIRE ? + sizeof(struct pfvf_acquire_resp_tlv) : + sizeof(struct pfvf_general_resp_tlv); + bnx2x_add_tlv(bp, resp, 0, type, length); + resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc); + bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + bnx2x_dp_tlv_list(bp, resp); + DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", + mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); + + /* send response */ + vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + + mbx->first_tlv.resp_msg_offset; + pf_addr = mbx->msg_mapping + + offsetof(struct bnx2x_vf_mbx_msg, resp); + + /* copy the response body, if there is one, before the header, as the vf + * is sensitive to the header being written + */ + if (resp->hdr.tl.length > sizeof(u64)) { + length = resp->hdr.tl.length - sizeof(u64); + vf_addr += sizeof(u64); + pf_addr += sizeof(u64); + rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, + U64_HI(vf_addr), + U64_LO(vf_addr), + length/4); + if (rc) { + BNX2X_ERR("Failed to copy response body to VF %d\n", + vf->abs_vfid); + goto mbx_error; + } + vf_addr -= sizeof(u64); + pf_addr -= sizeof(u64); + } + + /* ack the FW */ + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + mmiowb(); + + /* initiate dmae to send the response */ + mbx->flags &= ~VF_MSG_INPROCESS; + + /* copy the response header including status-done field, + * must be last dmae, must be after FW is acked + */ + rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, + U64_HI(vf_addr), + U64_LO(vf_addr), + sizeof(u64)/4); + + /* unlock channel mutex */ + bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); + + if (rc) { + BNX2X_ERR("Failed to copy response status to VF %d\n", + vf->abs_vfid); + goto mbx_error; + } + return; + +mbx_error: + bnx2x_vf_release(bp, vf, false); /* non blocking */ +} + +static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx, int vfop_status) +{ + int i; + struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp; + struct pf_vf_resc *resc = &resp->resc; + u8 status = bnx2x_pfvf_status_codes(vfop_status); + + memset(resp, 0, sizeof(*resp)); + + /* fill in pfdev info */ + resp->pfdev_info.chip_num = bp->common.chip_id; + resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT); + resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; + resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | + /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); + bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, + sizeof(resp->pfdev_info.fw_ver)); + + if (status == PFVF_STATUS_NO_RESOURCE || + status == PFVF_STATUS_SUCCESS) { + /* set resources numbers, if status equals NO_RESOURCE these + * are max possible numbers + */ + resc->num_rxqs = vf_rxq_count(vf) ? : + bnx2x_vf_max_queue_cnt(bp, vf); + resc->num_txqs = vf_txq_count(vf) ? : + bnx2x_vf_max_queue_cnt(bp, vf); + resc->num_sbs = vf_sb_count(vf); + resc->num_mac_filters = vf_mac_rules_cnt(vf); + resc->num_vlan_filters = vf_vlan_rules_cnt(vf); + resc->num_mc_filters = 0; + + if (status == PFVF_STATUS_SUCCESS) { + /* fill in the allocated resources */ + struct pf_vf_bulletin_content *bulletin = + BP_VF_BULLETIN(bp, vf->index); + + for_each_vfq(vf, i) + resc->hw_qid[i] = + vfq_qzone_id(vf, vfq_get(vf, i)); + + for_each_vf_sb(vf, i) { + resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i); + resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i); + } + + /* if a mac has been set for this vf, supply it */ + if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) { + memcpy(resc->current_mac_addr, bulletin->mac, + ETH_ALEN); + } + } + } + + DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n" + "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n", + vf->abs_vfid, + resp->pfdev_info.chip_num, + resp->pfdev_info.db_size, + resp->pfdev_info.indices_per_sb, + resp->pfdev_info.pf_cap, + resc->num_rxqs, + resc->num_txqs, + resc->num_sbs, + resc->num_mac_filters, + resc->num_vlan_filters, + resc->num_mc_filters, + resp->pfdev_info.fw_ver); + + DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ "); + for (i = 0; i < vf_rxq_count(vf); i++) + DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]); + DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ "); + for (i = 0; i < vf_sb_count(vf); i++) + DP_CONT(BNX2X_MSG_IOV, "%d:%d ", + resc->hw_sbs[i].hw_sb_id, + resc->hw_sbs[i].sb_qid); + DP_CONT(BNX2X_MSG_IOV, "]\n"); + + /* send the response */ + vf->op_rc = vfop_status; + bnx2x_vf_mbx_resp(bp, vf); +} + +static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + int rc; + struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire; + + /* log vfdef info */ + DP(BNX2X_MSG_IOV, + "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n", + vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os, + acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs, + acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters, + acquire->resc_request.num_vlan_filters, + acquire->resc_request.num_mc_filters); + + /* acquire the resources */ + rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request); + + /* store address of vf's bulletin board */ + vf->bulletin_map = acquire->bulletin_addr; + + /* response */ + bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc); +} + +static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct vfpf_init_tlv *init = &mbx->msg->req.init; + + /* record ghost addresses from vf message */ + vf->spq_map = init->spq_addr; + vf->fw_stat_map = init->stats_addr; + vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); + + /* response */ + bnx2x_vf_mbx_resp(bp, vf); +} + +/* convert MBX queue-flags to standard SP queue-flags */ +static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags, + unsigned long *sp_q_flags) +{ + if (mbx_q_flags & VFPF_QUEUE_FLG_TPA) + __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6) + __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO) + __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_STATS) + __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_OV) + __set_bit(BNX2X_Q_FLG_OV, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN) + __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_COS) + __set_bit(BNX2X_Q_FLG_COS, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_HC) + __set_bit(BNX2X_Q_FLG_HC, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) + __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); +} + +static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q; + struct bnx2x_vfop_cmd cmd = { + .done = bnx2x_vf_mbx_resp, + .block = false, + }; + + /* verify vf_qid */ + if (setup_q->vf_qid >= vf_rxq_count(vf)) { + BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n", + setup_q->vf_qid, vf_rxq_count(vf)); + vf->op_rc = -EINVAL; + goto response; + } + + /* tx queues must be setup alongside rx queues thus if the rx queue + * is not marked as valid there's nothing to do. + */ + if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) { + struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid); + unsigned long q_type = 0; + + struct bnx2x_queue_init_params *init_p; + struct bnx2x_queue_setup_params *setup_p; + + /* reinit the VF operation context */ + memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); + setup_p = &vf->op_params.qctor.prep_qsetup; + init_p = &vf->op_params.qctor.qstate.params.init; + + /* activate immediately */ + __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags); + + if (setup_q->param_valid & VFPF_TXQ_VALID) { + struct bnx2x_txq_setup_params *txq_params = + &setup_p->txq_params; + + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + /* save sb resource index */ + q->sb_idx = setup_q->txq.vf_sb; + + /* tx init */ + init_p->tx.hc_rate = setup_q->txq.hc_rate; + init_p->tx.sb_cq_index = setup_q->txq.sb_index; + + bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags, + &init_p->tx.flags); + + /* tx setup - flags */ + bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags, + &setup_p->flags); + + /* tx setup - general, nothing */ + + /* tx setup - tx */ + txq_params->dscr_map = setup_q->txq.txq_addr; + txq_params->sb_cq_index = setup_q->txq.sb_index; + txq_params->traffic_type = setup_q->txq.traffic_type; + + bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p, + q->index, q->sb_idx); + } + + if (setup_q->param_valid & VFPF_RXQ_VALID) { + struct bnx2x_rxq_setup_params *rxq_params = + &setup_p->rxq_params; + + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + + /* Note: there is no support for different SBs + * for TX and RX + */ + q->sb_idx = setup_q->rxq.vf_sb; + + /* rx init */ + init_p->rx.hc_rate = setup_q->rxq.hc_rate; + init_p->rx.sb_cq_index = setup_q->rxq.sb_index; + bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags, + &init_p->rx.flags); + + /* rx setup - flags */ + bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags, + &setup_p->flags); + + /* rx setup - general */ + setup_p->gen_params.mtu = setup_q->rxq.mtu; + + /* rx setup - rx */ + rxq_params->drop_flags = setup_q->rxq.drop_flags; + rxq_params->dscr_map = setup_q->rxq.rxq_addr; + rxq_params->sge_map = setup_q->rxq.sge_addr; + rxq_params->rcq_map = setup_q->rxq.rcq_addr; + rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr; + rxq_params->buf_sz = setup_q->rxq.buf_sz; + rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz; + rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt; + rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz; + rxq_params->cache_line_log = + setup_q->rxq.cache_line_log; + rxq_params->sb_cq_index = setup_q->rxq.sb_index; + + bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p, + q->index, q->sb_idx); + } + /* complete the preparations */ + bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type); + + vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index); + if (vf->op_rc) + goto response; + return; + } +response: + bnx2x_vf_mbx_resp(bp, vf); +} + +enum bnx2x_vfop_filters_state { + BNX2X_VFOP_MBX_Q_FILTERS_MACS, + BNX2X_VFOP_MBX_Q_FILTERS_VLANS, + BNX2X_VFOP_MBX_Q_FILTERS_RXMODE, + BNX2X_VFOP_MBX_Q_FILTERS_MCAST, + BNX2X_VFOP_MBX_Q_FILTERS_DONE +}; + +static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct vfpf_set_q_filters_tlv *tlv, + struct bnx2x_vfop_filters **pfl, + u32 type_flag) +{ + int i, j; + struct bnx2x_vfop_filters *fl = NULL; + size_t fsz; + + fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) + + sizeof(struct bnx2x_vfop_filters); + + fl = kzalloc(fsz, GFP_KERNEL); + if (!fl) + return -ENOMEM; + + INIT_LIST_HEAD(&fl->head); + + for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) { + struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i]; + + if ((msg_filter->flags & type_flag) != type_flag) + continue; + if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) { + fl->filters[j].mac = msg_filter->mac; + fl->filters[j].type = BNX2X_VFOP_FILTER_MAC; + } else { + fl->filters[j].vid = msg_filter->vlan_tag; + fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN; + } + fl->filters[j].add = + (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ? + true : false; + list_add_tail(&fl->filters[j++].link, &fl->head); + } + if (list_empty(&fl->head)) + kfree(fl); + else + *pfl = fl; + + return 0; +} + +static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx, + struct vfpf_q_mac_vlan_filter *filter) +{ + DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags); + if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID) + DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag); + if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID) + DP_CONT(msglvl, ", MAC=%pM", filter->mac); + DP_CONT(msglvl, "\n"); +} + +static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, + struct vfpf_set_q_filters_tlv *filters) +{ + int i; + + if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) + for (i = 0; i < filters->n_mac_vlan_filters; i++) + bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i, + &filters->filters[i]); + + if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) + DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask); + + if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) + for (i = 0; i < filters->n_multicast; i++) + DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]); +} + +#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID +#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID + +static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int rc; + + struct vfpf_set_q_filters_tlv *msg = + &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; + + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + enum bnx2x_vfop_filters_state state = vfop->state; + + struct bnx2x_vfop_cmd cmd = { + .done = bnx2x_vfop_mbx_qfilters, + .block = false, + }; + + DP(BNX2X_MSG_IOV, "STATE: %d\n", state); + + if (vfop->rc < 0) + goto op_err; + + switch (state) { + case BNX2X_VFOP_MBX_Q_FILTERS_MACS: + /* next state */ + vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS; + + /* check for any vlan/mac changes */ + if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { + /* build mac list */ + struct bnx2x_vfop_filters *fl = NULL; + + vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, + VFPF_MAC_FILTER); + if (vfop->rc) + goto op_err; + + if (fl) { + /* set mac list */ + rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl, + msg->vf_qid, + false); + if (rc) { + vfop->rc = rc; + goto op_err; + } + return; + } + } + /* fall through */ + + case BNX2X_VFOP_MBX_Q_FILTERS_VLANS: + /* next state */ + vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE; + + /* check for any vlan/mac changes */ + if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { + /* build vlan list */ + struct bnx2x_vfop_filters *fl = NULL; + + vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, + VFPF_VLAN_FILTER); + if (vfop->rc) + goto op_err; + + if (fl) { + /* set vlan list */ + rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl, + msg->vf_qid, + false); + if (rc) { + vfop->rc = rc; + goto op_err; + } + return; + } + } + /* fall through */ + + case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE: + /* next state */ + vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST; + + if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { + unsigned long accept = 0; + + /* covert VF-PF if mask to bnx2x accept flags */ + if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) + __set_bit(BNX2X_ACCEPT_UNICAST, &accept); + + if (msg->rx_mask & + VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST) + __set_bit(BNX2X_ACCEPT_MULTICAST, &accept); + + if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST) + __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept); + + if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST) + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept); + + if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST) + __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); + + /* A packet arriving the vf's mac should be accepted + * with any vlan + */ + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); + + /* set rx-mode */ + rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, + msg->vf_qid, accept); + if (rc) { + vfop->rc = rc; + goto op_err; + } + return; + } + /* fall through */ + + case BNX2X_VFOP_MBX_Q_FILTERS_MCAST: + /* next state */ + vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE; + + if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { + /* set mcasts */ + rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast, + msg->n_multicast, false); + if (rc) { + vfop->rc = rc; + goto op_err; + } + return; + } + /* fall through */ +op_done: + case BNX2X_VFOP_MBX_Q_FILTERS_DONE: + bnx2x_vfop_end(bp, vf, vfop); + return; +op_err: + BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", + vf->abs_vfid, msg->vf_qid, vfop->rc); + goto op_done; + + default: + bnx2x_vfop_default(state); + } +} + +static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + if (vfop) { + bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS, + bnx2x_vfop_mbx_qfilters, cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters, + cmd->block); + } + return -ENOMEM; +} + +static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters; + struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); + struct bnx2x_vfop_cmd cmd = { + .done = bnx2x_vf_mbx_resp, + .block = false, + }; + + /* if a mac was already set for this VF via the set vf mac ndo, we only + * accept mac configurations of that mac. Why accept them at all? + * because PF may have been unable to configure the mac at the time + * since queue was not set up. + */ + if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) { + /* once a mac was set by ndo can only accept a single mac... */ + if (filters->n_mac_vlan_filters > 1) { + BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", + vf->abs_vfid); + vf->op_rc = -EPERM; + goto response; + } + + /* ...and only the mac set by the ndo */ + if (filters->n_mac_vlan_filters == 1 && + memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) { + BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n", + vf->abs_vfid); + + vf->op_rc = -EPERM; + goto response; + } + } + + /* verify vf_qid */ + if (filters->vf_qid > vf_rxq_count(vf)) + goto response; + + DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", + vf->abs_vfid, + filters->vf_qid); + + /* print q_filter message */ + bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); + + vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd); + if (vf->op_rc) + goto response; + return; + +response: + bnx2x_vf_mbx_resp(bp, vf); +} + +static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + int qid = mbx->msg->req.q_op.vf_qid; + struct bnx2x_vfop_cmd cmd = { + .done = bnx2x_vf_mbx_resp, + .block = false, + }; + + DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n", + vf->abs_vfid, qid); + + vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid); + if (vf->op_rc) + bnx2x_vf_mbx_resp(bp, vf); +} + +static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct bnx2x_vfop_cmd cmd = { + .done = bnx2x_vf_mbx_resp, + .block = false, + }; + + DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid); + + vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); + if (vf->op_rc) + bnx2x_vf_mbx_resp(bp, vf); +} + +static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct bnx2x_vfop_cmd cmd = { + .done = bnx2x_vf_mbx_resp, + .block = false, + }; + + DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid); + + vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); + if (vf->op_rc) + bnx2x_vf_mbx_resp(bp, vf); +} + +/* dispatch request */ +static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + int i; + + /* check if tlv type is known */ + if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) { + /* Lock the per vf op mutex and note the locker's identity. + * The unlock will take place in mbx response. + */ + bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); + + /* switch on the opcode */ + switch (mbx->first_tlv.tl.type) { + case CHANNEL_TLV_ACQUIRE: + bnx2x_vf_mbx_acquire(bp, vf, mbx); + break; + case CHANNEL_TLV_INIT: + bnx2x_vf_mbx_init_vf(bp, vf, mbx); + break; + case CHANNEL_TLV_SETUP_Q: + bnx2x_vf_mbx_setup_q(bp, vf, mbx); + break; + case CHANNEL_TLV_SET_Q_FILTERS: + bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); + break; + case CHANNEL_TLV_TEARDOWN_Q: + bnx2x_vf_mbx_teardown_q(bp, vf, mbx); + break; + case CHANNEL_TLV_CLOSE: + bnx2x_vf_mbx_close_vf(bp, vf, mbx); + break; + case CHANNEL_TLV_RELEASE: + bnx2x_vf_mbx_release_vf(bp, vf, mbx); + break; + } + + } else { + /* unknown TLV - this may belong to a VF driver from the future + * - a version written after this PF driver was written, which + * supports features unknown as of yet. Too bad since we don't + * support them. Or this may be because someone wrote a crappy + * VF driver and is sending garbage over the channel. + */ + BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", + mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); + for (i = 0; i < 20; i++) + DP_CONT(BNX2X_MSG_IOV, "%x ", + mbx->msg->req.tlv_buf_size.tlv_buffer[i]); + + /* test whether we can respond to the VF (do we have an address + * for it?) + */ + if (vf->state == VF_ACQUIRED) { + /* mbx_resp uses the op_rc of the VF */ + vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; + + /* notify the VF that we do not support this request */ + bnx2x_vf_mbx_resp(bp, vf); + } else { + /* can't send a response since this VF is unknown to us + * just unlock the channel and be done with. + */ + bnx2x_unlock_vf_pf_channel(bp, vf, + mbx->first_tlv.tl.type); + } + } +} + +/* handle new vf-pf message */ +void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) +{ + struct bnx2x_virtf *vf; + struct bnx2x_vf_mbx *mbx; + u8 vf_idx; + int rc; + + DP(BNX2X_MSG_IOV, + "vf pf event received: vfid %d, address_hi %x, address lo %x", + vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo); + /* Sanity checks consider removing later */ + + /* check if the vf_id is valid */ + if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf > + BNX2X_NR_VIRTFN(bp)) { + BNX2X_ERR("Illegal vf_id %d max allowed: %d\n", + vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); + goto mbx_done; + } + vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); + mbx = BP_VF_MBX(bp, vf_idx); + + /* verify an event is not currently being processed - + * debug failsafe only + */ + if (mbx->flags & VF_MSG_INPROCESS) { + BNX2X_ERR("Previous message is still being processed, vf_id %d\n", + vfpf_event->vf_id); + goto mbx_done; + } + vf = BP_VF(bp, vf_idx); + + /* save the VF message address */ + mbx->vf_addr_hi = vfpf_event->msg_addr_hi; + mbx->vf_addr_lo = vfpf_event->msg_addr_lo; + DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", + mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); + + /* dmae to get the VF request */ + rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid, + mbx->vf_addr_hi, mbx->vf_addr_lo, + sizeof(union vfpf_tlvs)/4); + if (rc) { + BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid); + goto mbx_error; + } + + /* process the VF message header */ + mbx->first_tlv = mbx->msg->req.first_tlv; + + /* dispatch the request (will prepare the response) */ + bnx2x_vf_mbx_request(bp, vf, mbx); + goto mbx_done; + +mbx_error: + bnx2x_vf_release(bp, vf, false); /* non blocking */ +mbx_done: + return; +} + +/* propagate local bulletin board to vf */ +int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf) +{ + struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf); + dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping + + vf * BULLETIN_CONTENT_SIZE; + dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map); + int rc; + + /* can only update vf after init took place */ + if (bnx2x_vf(bp, vf, state) != VF_ENABLED && + bnx2x_vf(bp, vf, state) != VF_ACQUIRED) + return 0; + + /* increment bulletin board version and compute crc */ + bulletin->version++; + bulletin->length = BULLETIN_CONTENT_SIZE; + bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin); + + /* propagate bulletin board via dmae to vm memory */ + rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, + bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr), + U64_LO(vf_addr), bulletin->length / 4); + return rc; +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h new file mode 100644 index 000000000000..bfc80baec00d --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -0,0 +1,360 @@ +/* bnx2x_vfpf.h: Broadcom Everest network driver. + * + * Copyright (c) 2011-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Written by: Ariel Elior <ariele@broadcom.com> + */ +#ifndef VF_PF_IF_H +#define VF_PF_IF_H + +#ifdef CONFIG_BNX2X_SRIOV + +/* Common definitions for all HVs */ +struct vf_pf_resc_request { + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; /* No limit so superfluous */ +}; + +struct hw_sb_info { + u8 hw_sb_id; /* aka absolute igu id, used to ack the sb */ + u8 sb_qid; /* used to update DHC for sb */ +}; + +/* HW VF-PF channel definitions + * A.K.A VF-PF mailbox + */ +#define TLV_BUFFER_SIZE 1024 +#define PF_VF_BULLETIN_SIZE 512 + +#define VFPF_QUEUE_FLG_TPA 0x0001 +#define VFPF_QUEUE_FLG_TPA_IPV6 0x0002 +#define VFPF_QUEUE_FLG_TPA_GRO 0x0004 +#define VFPF_QUEUE_FLG_CACHE_ALIGN 0x0008 +#define VFPF_QUEUE_FLG_STATS 0x0010 +#define VFPF_QUEUE_FLG_OV 0x0020 +#define VFPF_QUEUE_FLG_VLAN 0x0040 +#define VFPF_QUEUE_FLG_COS 0x0080 +#define VFPF_QUEUE_FLG_HC 0x0100 +#define VFPF_QUEUE_FLG_DHC 0x0200 + +#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0) +#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1) +#define VFPF_QUEUE_DROP_TTL0 (1 << 2) +#define VFPF_QUEUE_DROP_UDP_CS_ERR (1 << 3) + +#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000 +#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001 +#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002 +#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004 +#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008 +#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010 +#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content)) +#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */ +#define BULLETIN_CRC_SEED 0 + +enum { + PFVF_STATUS_WAITING = 0, + PFVF_STATUS_SUCCESS, + PFVF_STATUS_FAILURE, + PFVF_STATUS_NOT_SUPPORTED, + PFVF_STATUS_NO_RESOURCE +}; + +/* vf pf channel tlvs */ +/* general tlv header (used for both vf->pf request and pf->vf response) */ +struct channel_tlv { + u16 type; + u16 length; +}; + +/* header of first vf->pf tlv carries the offset used to calculate response + * buffer address + */ +struct vfpf_first_tlv { + struct channel_tlv tl; + u32 resp_msg_offset; +}; + +/* header of pf->vf tlvs, carries the status of handling the request */ +struct pfvf_tlv { + struct channel_tlv tl; + u8 status; + u8 padding[3]; +}; + +/* response tlv used for most tlvs */ +struct pfvf_general_resp_tlv { + struct pfvf_tlv hdr; +}; + +/* used to terminate and pad a tlv list */ +struct channel_list_end_tlv { + struct channel_tlv tl; + u8 padding[4]; +}; + +/* Acquire */ +struct vfpf_acquire_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_vfdev_info { + /* the following fields are for debug purposes */ + u8 vf_id; /* ME register value */ + u8 vf_os; /* e.g. Linux, W2K8 */ + u8 padding[2]; + } vfdev_info; + + struct vf_pf_resc_request resc_request; + + aligned_u64 bulletin_addr; +}; + +/* simple operation request on queue */ +struct vfpf_q_op_tlv { + struct vfpf_first_tlv first_tlv; + u8 vf_qid; + u8 padding[3]; +}; + +/* acquire response tlv - carries the allocated resources */ +struct pfvf_acquire_resp_tlv { + struct pfvf_tlv hdr; + struct pf_vf_pfdev_info { + u32 chip_num; + u32 pf_cap; +#define PFVF_CAP_RSS 0x00000001 +#define PFVF_CAP_DHC 0x00000002 +#define PFVF_CAP_TPA 0x00000004 + char fw_ver[32]; + u16 db_size; + u8 indices_per_sb; + u8 padding; + } pfdev_info; + struct pf_vf_resc { + /* in case of status NO_RESOURCE in message hdr, pf will fill + * this struct with suggested amount of resources for next + * acquire request + */ +#define PFVF_MAX_QUEUES_PER_VF 16 +#define PFVF_MAX_SBS_PER_VF 16 + struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF]; + u8 hw_qid[PFVF_MAX_QUEUES_PER_VF]; + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; + u8 permanent_mac_addr[ETH_ALEN]; + u8 current_mac_addr[ETH_ALEN]; + u8 padding[2]; + } resc; +}; + +/* Init VF */ +struct vfpf_init_tlv { + struct vfpf_first_tlv first_tlv; + aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */ + aligned_u64 spq_addr; + aligned_u64 stats_addr; +}; + +/* Setup Queue */ +struct vfpf_setup_q_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_rxq_params { + /* physical addresses */ + aligned_u64 rcq_addr; + aligned_u64 rcq_np_addr; + aligned_u64 rxq_addr; + aligned_u64 sge_addr; + + /* sb + hc info */ + u8 vf_sb; /* index in hw_sbs[] */ + u8 sb_index; /* Index in the SB */ + u16 hc_rate; /* desired interrupts per sec. */ + /* valid iff VFPF_QUEUE_FLG_HC */ + /* rx buffer info */ + u16 mtu; + u16 buf_sz; + u16 flags; /* VFPF_QUEUE_FLG_X flags */ + u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */ + + /* valid iff VFPF_QUEUE_FLG_TPA */ + u16 sge_buf_sz; + u16 tpa_agg_sz; + u8 max_sge_pkt; + + u8 drop_flags; /* VFPF_QUEUE_DROP_X, for Linux VMs + * all the flags are turned off + */ + + u8 cache_line_log; /* VFPF_QUEUE_FLG_CACHE_ALIGN */ + u8 padding; + } rxq; + + struct vf_pf_txq_params { + /* physical addresses */ + aligned_u64 txq_addr; + + /* sb + hc info */ + u8 vf_sb; /* index in hw_sbs[] */ + u8 sb_index; /* Index in the SB */ + u16 hc_rate; /* desired interrupts per sec. */ + /* valid iff VFPF_QUEUE_FLG_HC */ + u32 flags; /* VFPF_QUEUE_FLG_X flags */ + u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */ + u8 traffic_type; /* see in setup_context() */ + u8 padding; + } txq; + + u8 vf_qid; /* index in hw_qid[] */ + u8 param_valid; +#define VFPF_RXQ_VALID 0x01 +#define VFPF_TXQ_VALID 0x02 + u8 padding[2]; +}; + +/* Set Queue Filters */ +struct vfpf_q_mac_vlan_filter { + u32 flags; +#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 +#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 +#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ + u8 mac[ETH_ALEN]; + u16 vlan_tag; +}; + +/* configure queue filters */ +struct vfpf_set_q_filters_tlv { + struct vfpf_first_tlv first_tlv; + + u32 flags; +#define VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED 0x01 +#define VFPF_SET_Q_FILTERS_MULTICAST_CHANGED 0x02 +#define VFPF_SET_Q_FILTERS_RX_MASK_CHANGED 0x04 + + u8 vf_qid; /* index in hw_qid[] */ + u8 n_mac_vlan_filters; + u8 n_multicast; + u8 padding; + +#define PFVF_MAX_MAC_FILTERS 16 +#define PFVF_MAX_VLAN_FILTERS 16 +#define PFVF_MAX_FILTERS (PFVF_MAX_MAC_FILTERS +\ + PFVF_MAX_VLAN_FILTERS) + struct vfpf_q_mac_vlan_filter filters[PFVF_MAX_FILTERS]; + +#define PFVF_MAX_MULTICAST_PER_VF 32 + u8 multicast[PFVF_MAX_MULTICAST_PER_VF][ETH_ALEN]; + + u32 rx_mask; /* see mask constants at the top of the file */ +}; + +/* close VF (disable VF) */ +struct vfpf_close_tlv { + struct vfpf_first_tlv first_tlv; + u16 vf_id; /* for debug */ + u8 padding[2]; +}; + +/* release the VF's acquired resources */ +struct vfpf_release_tlv { + struct vfpf_first_tlv first_tlv; + u16 vf_id; + u8 padding[2]; +}; + +struct tlv_buffer_size { + u8 tlv_buffer[TLV_BUFFER_SIZE]; +}; + +union vfpf_tlvs { + struct vfpf_first_tlv first_tlv; + struct vfpf_acquire_tlv acquire; + struct vfpf_init_tlv init; + struct vfpf_close_tlv close; + struct vfpf_q_op_tlv q_op; + struct vfpf_setup_q_tlv setup_q; + struct vfpf_set_q_filters_tlv set_q_filters; + struct vfpf_release_tlv release; + struct channel_list_end_tlv list_end; + struct tlv_buffer_size tlv_buf_size; +}; + +union pfvf_tlvs { + struct pfvf_general_resp_tlv general_resp; + struct pfvf_acquire_resp_tlv acquire_resp; + struct channel_list_end_tlv list_end; + struct tlv_buffer_size tlv_buf_size; +}; + +/* This is a structure which is allocated in the VF, which the PF may update + * when it deems it necessary to do so. The bulletin board is sampled + * periodically by the VF. A copy per VF is maintained in the PF (to prevent + * loss of data upon multiple updates (or the need for read modify write)). + */ +struct pf_vf_bulletin_size { + u8 size[PF_VF_BULLETIN_SIZE]; +}; + +struct pf_vf_bulletin_content { + u32 crc; /* crc of structure to ensure is not in + * mid-update + */ + u16 version; + u16 length; + + aligned_u64 valid_bitmap; /* bitmap indicating which fields + * hold valid values + */ + +#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address + * is available for it + */ + + u8 mac[ETH_ALEN]; + u8 padding[2]; +}; + +union pf_vf_bulletin { + struct pf_vf_bulletin_content content; + struct pf_vf_bulletin_size size; +}; + +#define MAX_TLVS_IN_LIST 50 + +enum channel_tlvs { + CHANNEL_TLV_NONE, + CHANNEL_TLV_ACQUIRE, + CHANNEL_TLV_INIT, + CHANNEL_TLV_SETUP_Q, + CHANNEL_TLV_SET_Q_FILTERS, + CHANNEL_TLV_TEARDOWN_Q, + CHANNEL_TLV_CLOSE, + CHANNEL_TLV_RELEASE, + CHANNEL_TLV_PF_RELEASE_VF, + CHANNEL_TLV_LIST_END, + CHANNEL_TLV_FLR, + CHANNEL_TLV_PF_SET_MAC, + CHANNEL_TLV_MAX +}; + +#endif /* CONFIG_BNX2X_SRIOV */ +#endif /* VF_PF_IF_H */ diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index df8c30d1a52c..149a3a038491 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -4816,6 +4816,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) return err; } + ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ; + return 0; } @@ -5136,6 +5138,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) if (ret) return ret; + ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ; return 0; } @@ -5387,6 +5390,7 @@ static void cnic_stop_hw(struct cnic_dev *dev) } cnic_shutdown_rings(dev); cp->stop_cm(dev); + cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ; clear_bit(CNIC_F_CNIC_UP, &dev->flags); RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL); synchronize_rcu(); @@ -5421,11 +5425,9 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); - cdev = kzalloc(alloc_size , GFP_KERNEL); - if (cdev == NULL) { - netdev_err(dev, "allocate dev struct failure\n"); + cdev = kzalloc(alloc_size, GFP_KERNEL); + if (cdev == NULL) return NULL; - } cdev->netdev = dev; cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 2a35436f9095..0c9367a0f57d 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -179,6 +179,7 @@ struct cnic_eth_dev { #define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004 #define CNIC_DRV_STATE_NO_ISCSI 0x00000008 #define CNIC_DRV_STATE_NO_FCOE 0x00000010 +#define CNIC_DRV_STATE_HANDLES_IRQ 0x00000020 u32 chip_id; u32 max_kwqe_pending; struct pci_dev *pdev; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 3a1c8a3cf7c9..e9b35da375cb 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2385,7 +2385,7 @@ static int sbmac_mii_probe(struct net_device *dev) return -ENXIO; } - phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0, + phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phy_dev)) { printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index bdb086934cd9..fdb9b5655414 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -4,7 +4,7 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2005-2012 Broadcom Corporation. + * Copyright (C) 2005-2013 Broadcom Corporation. * * Firmware is: * Derived from proprietary unpublished source code, @@ -44,6 +44,7 @@ #include <linux/prefetch.h> #include <linux/dma-mapping.h> #include <linux/firmware.h> +#include <linux/ssb/ssb_driver_gige.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> @@ -93,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 128 +#define TG3_MIN_NUM 130 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "December 03, 2012" +#define DRV_MODULE_RELDATE "February 14, 2013" #define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_INIT 1 @@ -263,6 +264,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { TG3_DRV_DATA_FLAG_5705_10_100}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), @@ -330,6 +332,10 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, @@ -570,7 +576,9 @@ static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) { tp->write32_mbox(tp, off, val); - if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND)) + if (tg3_flag(tp, FLUSH_POSTED_WRITES) || + (!tg3_flag(tp, MBOX_WRITE_REORDER) && + !tg3_flag(tp, ICH_WORKAROUND))) tp->read32_mbox(tp, off); } @@ -580,7 +588,8 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) writel(val, mbox); if (tg3_flag(tp, TXD_MBOX_HWBUG)) writel(val, mbox); - if (tg3_flag(tp, MBOX_WRITE_REORDER)) + if (tg3_flag(tp, MBOX_WRITE_REORDER) || + tg3_flag(tp, FLUSH_POSTED_WRITES)) readl(mbox); } @@ -609,7 +618,7 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) { unsigned long flags; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && + if (tg3_asic_rev(tp) == ASIC_REV_5906 && (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) return; @@ -634,7 +643,7 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) { unsigned long flags; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && + if (tg3_asic_rev(tp) == ASIC_REV_5906 && (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { *val = 0; return; @@ -662,7 +671,7 @@ static void tg3_ape_lock_init(struct tg3 *tp) int i; u32 regbase, bit; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + if (tg3_asic_rev(tp) == ASIC_REV_5761) regbase = TG3_APE_LOCK_GRANT; else regbase = TG3_APE_PER_LOCK_GRANT; @@ -698,7 +707,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) switch (locknum) { case TG3_APE_LOCK_GPIO: - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + if (tg3_asic_rev(tp) == ASIC_REV_5761) return 0; case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: @@ -717,7 +726,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) return -EINVAL; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + if (tg3_asic_rev(tp) == ASIC_REV_5761) { req = TG3_APE_LOCK_REQ; gnt = TG3_APE_LOCK_GRANT; } else { @@ -755,7 +764,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) switch (locknum) { case TG3_APE_LOCK_GPIO: - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + if (tg3_asic_rev(tp) == ASIC_REV_5761) return; case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: @@ -774,7 +783,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) return; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + if (tg3_asic_rev(tp) == ASIC_REV_5761) gnt = TG3_APE_LOCK_GRANT; else gnt = TG3_APE_PER_LOCK_GRANT; @@ -1088,7 +1097,8 @@ static void tg3_switch_clocks(struct tg3 *tp) #define PHY_BUSY_LOOPS 5000 -static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) +static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, + u32 *val) { u32 frame_val; unsigned int loops; @@ -1104,7 +1114,7 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) *val = 0x0; - frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & + frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & MI_COM_PHY_ADDR_MASK); frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & MI_COM_REG_ADDR_MASK); @@ -1141,7 +1151,13 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) return ret; } -static int tg3_writephy(struct tg3 *tp, int reg, u32 val) +static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) +{ + return __tg3_readphy(tp, tp->phy_addr, reg, val); +} + +static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, + u32 val) { u32 frame_val; unsigned int loops; @@ -1159,7 +1175,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) tg3_ape_lock(tp, tp->phy_ape_lock); - frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & + frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & MI_COM_PHY_ADDR_MASK); frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & MI_COM_REG_ADDR_MASK); @@ -1194,6 +1210,11 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) return ret; } +static int tg3_writephy(struct tg3 *tp, int reg, u32 val) +{ + return __tg3_writephy(tp, tp->phy_addr, reg, val); +} + static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) { int err; @@ -1458,7 +1479,7 @@ static void tg3_mdio_start(struct tg3 *tp) udelay(80); if (tg3_flag(tp, MDIOBUS_INITED) && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_asic_rev(tp) == ASIC_REV_5785) tg3_mdio_config_5785(tp); } @@ -1473,7 +1494,7 @@ static int tg3_mdio_init(struct tg3 *tp) tp->phy_addr = tp->pci_fn + 1; - if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) + if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; else is_serdes = tr32(TG3_CPMU_PHY_STRAP) & @@ -1561,7 +1582,7 @@ static int tg3_mdio_init(struct tg3 *tp) tg3_flag_set(tp, MDIOBUS_INITED); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + if (tg3_asic_rev(tp) == ASIC_REV_5785) tg3_mdio_config_5785(tp); return 0; @@ -1778,7 +1799,12 @@ static int tg3_poll_fw(struct tg3 *tp) int i; u32 val; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tg3_flag(tp, IS_SSB_CORE)) { + /* We don't use firmware. */ + return 0; + } + + if (tg3_asic_rev(tp) == ASIC_REV_5906) { /* Wait up to 20ms for init done. */ for (i = 0; i < 200; i++) { if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) @@ -1807,7 +1833,7 @@ static int tg3_poll_fw(struct tg3 *tp) netdev_info(tp->dev, "No firmware running\n"); } - if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { /* The 57765 A0 needs a little more * time to do some important work. */ @@ -1937,7 +1963,7 @@ static void tg3_adjust_link(struct net_device *dev) if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) mac_mode |= MAC_MODE_PORT_MODE_MII; else if (phydev->speed == SPEED_1000 || - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) + tg3_asic_rev(tp) != ASIC_REV_5785) mac_mode |= MAC_MODE_PORT_MODE_GMII; else mac_mode |= MAC_MODE_PORT_MODE_MII; @@ -1964,7 +1990,7 @@ static void tg3_adjust_link(struct net_device *dev) udelay(40); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + if (tg3_asic_rev(tp) == ASIC_REV_5785) { if (phydev->speed == SPEED_10) tw32(MAC_MI_STAT, MAC_MI_STAT_10MBPS_MODE | @@ -2013,8 +2039,8 @@ static int tg3_phy_init(struct tg3 *tp) phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; /* Attach the MAC to the PHY. */ - phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, - phydev->dev_flags, phydev->interface); + phydev = phy_connect(tp->dev, dev_name(&phydev->dev), + tg3_adjust_link, phydev->interface); if (IS_ERR(phydev)) { dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); return PTR_ERR(phydev); @@ -2156,7 +2182,7 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) MII_TG3_MISC_SHDW_SCR5_DLPTLM | MII_TG3_MISC_SHDW_SCR5_SDTL | MII_TG3_MISC_SHDW_SCR5_C125OE; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable) + if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); @@ -2311,8 +2337,8 @@ static void tg3_phy_eee_enable(struct tg3 *tp) u32 val; if (tp->link_config.active_speed == SPEED_1000 && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || tg3_flag(tp, 57765_CLASS)) && !tg3_phy_toggle_auxctl_smdsp(tp, true)) { val = MII_TG3_DSP_TAP26_ALNOKO | @@ -2516,7 +2542,7 @@ static int tg3_phy_reset(struct tg3 *tp) u32 val, cpmuctrl; int err; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tg3_asic_rev(tp) == ASIC_REV_5906) { val = tr32(GRC_MISC_CFG); tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); udelay(40); @@ -2531,9 +2557,9 @@ static int tg3_phy_reset(struct tg3 *tp) tg3_link_report(tp); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + if (tg3_asic_rev(tp) == ASIC_REV_5703 || + tg3_asic_rev(tp) == ASIC_REV_5704 || + tg3_asic_rev(tp) == ASIC_REV_5705) { err = tg3_phy_reset_5703_4_5(tp); if (err) return err; @@ -2541,8 +2567,8 @@ static int tg3_phy_reset(struct tg3 *tp) } cpmuctrl = 0; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && - GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { + if (tg3_asic_rev(tp) == ASIC_REV_5784 && + tg3_chip_rev(tp) != CHIPREV_5784_AX) { cpmuctrl = tr32(TG3_CPMU_CTRL); if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) tw32(TG3_CPMU_CTRL, @@ -2560,8 +2586,8 @@ static int tg3_phy_reset(struct tg3 *tp) tw32(TG3_CPMU_CTRL, cpmuctrl); } - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || - GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { + if (tg3_chip_rev(tp) == CHIPREV_5784_AX || + tg3_chip_rev(tp) == CHIPREV_5761_AX) { val = tr32(TG3_CPMU_LSPD_1000MB_CLK); if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == CPMU_LSPD_1000MB_MACCLK_12_5) { @@ -2639,11 +2665,14 @@ out: val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tg3_asic_rev(tp) == ASIC_REV_5906) { /* adjust output voltage */ tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); } + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) + tg3_phydsp_write(tp, 0xffb, 0x4000); + tg3_phy_toggle_automdix(tp, 1); tg3_phy_set_wirespeed(tp); return 0; @@ -2669,8 +2698,8 @@ static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) { u32 status, shift; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719) status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); else status = tr32(TG3_CPMU_DRV_STATUS); @@ -2679,8 +2708,8 @@ static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) status &= ~(TG3_GPIO_MSG_MASK << shift); status |= (newstat << shift); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719) tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); else tw32(TG3_CPMU_DRV_STATUS, status); @@ -2693,9 +2722,9 @@ static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) if (!tg3_flag(tp, IS_NIC)) return 0; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) { if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) return -EIO; @@ -2718,8 +2747,8 @@ static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) u32 grc_local_ctrl; if (!tg3_flag(tp, IS_NIC) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) + tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) return; grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; @@ -2742,8 +2771,8 @@ static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) if (!tg3_flag(tp, IS_NIC)) return; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) { tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | (GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | @@ -2775,7 +2804,7 @@ static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) u32 grc_local_ctrl = 0; /* Workaround to prevent overdrawing Amps. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + if (tg3_asic_rev(tp) == ASIC_REV_5714) { grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | grc_local_ctrl, @@ -2847,9 +2876,9 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) return; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) { tg3_frob_aux_power_5717(tp, include_wol ? tg3_flag(tp, WOL_ENABLE) != 0 : 0); return; @@ -2901,7 +2930,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) u32 val; if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + if (tg3_asic_rev(tp) == ASIC_REV_5704) { u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); u32 serdes_cfg = tr32(MAC_SERDES_CFG); @@ -2913,7 +2942,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) return; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tg3_asic_rev(tp) == ASIC_REV_5906) { tg3_bmcr_reset(tp); val = tr32(GRC_MISC_CFG); tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); @@ -2952,16 +2981,16 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) /* The PHY should not be powered down on some chips because * of bugs. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5704 || + (tg3_asic_rev(tp) == ASIC_REV_5780 && (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && + (tg3_asic_rev(tp) == ASIC_REV_5717 && !tp->pci_fn)) return; - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || - GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { + if (tg3_chip_rev(tp) == CHIPREV_5784_AX || + tg3_chip_rev(tp) == CHIPREV_5761_AX) { val = tr32(TG3_CPMU_LSPD_1000MB_CLK); val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; val |= CPMU_LSPD_1000MB_MACCLK_12_5; @@ -3344,7 +3373,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, !tg3_flag(tp, 57765_PLUS)) tw32(NVRAM_ADDR, phy_addr); - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && + if (tg3_asic_rev(tp) != ASIC_REV_5752 && !tg3_flag(tp, 5755_PLUS) && (tp->nvram_jedecnum == JEDEC_ST) && (nvram_cmd & NVRAM_CMD_FIRST)) { @@ -3429,7 +3458,7 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tg3_asic_rev(tp) == ASIC_REV_5906) { u32 val = tr32(GRC_VCPU_EXT_CTRL); tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); @@ -3447,6 +3476,13 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) tw32_f(offset + CPU_MODE, CPU_MODE_HALT); udelay(10); } else { + /* + * There is only an Rx CPU for the 5750 derivative in the + * BCM4785. + */ + if (tg3_flag(tp, IS_SSB_CORE)) + return 0; + for (i = 0; i < 10000; i++) { tw32(offset + CPU_STATE, 0xffffffff); tw32(offset + CPU_MODE, CPU_MODE_HALT); @@ -3600,7 +3636,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp) info.fw_len = tp->fw->size - 12; info.fw_data = &fw_data[3]; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + if (tg3_asic_rev(tp) == ASIC_REV_5705) { cpu_base = RX_CPU_BASE; cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; } else { @@ -3658,8 +3694,8 @@ static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + if (tg3_asic_rev(tp) == ASIC_REV_5703 || + tg3_asic_rev(tp) == ASIC_REV_5704) { for (i = 0; i < 12; i++) { tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); @@ -3778,7 +3814,7 @@ static int tg3_power_down_prepare(struct tg3 *tp) tg3_setup_phy(tp, 0); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tg3_asic_rev(tp) == ASIC_REV_5906) { u32 val; val = tr32(GRC_VCPU_EXT_CTRL); @@ -3820,8 +3856,7 @@ static int tg3_power_down_prepare(struct tg3 *tp) mac_mode = MAC_MODE_PORT_MODE_MII; mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5700) { + if (tg3_asic_rev(tp) == ASIC_REV_5700) { u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? SPEED_100 : SPEED_10; if (tg3_5700_link_polarity(tp, speed)) @@ -3854,8 +3889,8 @@ static int tg3_power_down_prepare(struct tg3 *tp) } if (!tg3_flag(tp, WOL_SPEED_100MB) && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701)) { u32 base_val; base_val = tp->pci_clock_ctrl; @@ -3866,13 +3901,13 @@ static int tg3_power_down_prepare(struct tg3 *tp) CLOCK_CTRL_PWRDOWN_PLL133, 40); } else if (tg3_flag(tp, 5780_CLASS) || tg3_flag(tp, CPMU_PRESENT) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tg3_asic_rev(tp) == ASIC_REV_5906) { /* do nothing */ } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { u32 newbits1, newbits2; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) { newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | CLOCK_CTRL_TXCLK_DISABLE | CLOCK_CTRL_ALTCLK); @@ -3894,8 +3929,8 @@ static int tg3_power_down_prepare(struct tg3 *tp) if (!tg3_flag(tp, 5705_PLUS)) { u32 newbits3; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) { newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | CLOCK_CTRL_TXCLK_DISABLE | CLOCK_CTRL_44MHZ_CORE); @@ -3914,8 +3949,9 @@ static int tg3_power_down_prepare(struct tg3 *tp) tg3_frob_aux_power(tp, true); /* Workaround for unstable PLL clock */ - if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || - (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { + if ((!tg3_flag(tp, IS_SSB_CORE)) && + ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || + (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { u32 val = tr32(0x7d00); val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); @@ -4006,8 +4042,8 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; err = tg3_writephy(tp, MII_CTRL1000, new_adv); @@ -4036,7 +4072,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) if (err) val = 0; - switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { + switch (tg3_asic_rev(tp)) { case ASIC_REV_5717: case ASIC_REV_57765: case ASIC_REV_57766: @@ -4049,6 +4085,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); /* Fall through */ case ASIC_REV_5720: + case ASIC_REV_5762: if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | MII_TG3_DSP_CH34TP2_HIBW01); @@ -4183,8 +4220,8 @@ static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) return false; if (tgtadv && - (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) { + (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); @@ -4268,9 +4305,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) /* Some third-party PHYs need to be reset on link going * down. */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && + if ((tg3_asic_rev(tp) == ASIC_REV_5703 || + tg3_asic_rev(tp) == ASIC_REV_5704 || + tg3_asic_rev(tp) == ASIC_REV_5705) && tp->link_up) { tg3_readphy(tp, MII_BMSR, &bmsr); if (!tg3_readphy(tp, MII_BMSR, &bmsr) && @@ -4312,8 +4349,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) return err; } } - } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { + } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { /* 5701 {A0,B0} CRC bug workaround */ tg3_writephy(tp, 0x15, 0x0a75); tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); @@ -4330,8 +4367,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) tg3_writephy(tp, MII_TG3_IMASK, ~0); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) { if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_LNK3_LED_MODE); @@ -4435,6 +4472,15 @@ relink: if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { tg3_phy_copper_begin(tp); + if (tg3_flag(tp, ROBOSWITCH)) { + current_link_up = 1; + /* FIXME: when BCM5325 switch is used use 100 MBit/s */ + current_speed = SPEED_1000; + current_duplex = DUPLEX_FULL; + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; + } + tg3_readphy(tp, MII_BMSR, &bmsr); if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) @@ -4453,11 +4499,31 @@ relink: else tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + /* In order for the 5750 core in BCM4785 chip to work properly + * in RGMII mode, the Led Control Register must be set up. + */ + if (tg3_flag(tp, RGMII_MODE)) { + u32 led_ctrl = tr32(MAC_LED_CTRL); + led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); + + if (tp->link_config.active_speed == SPEED_10) + led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; + else if (tp->link_config.active_speed == SPEED_100) + led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_100MBPS_ON); + else if (tp->link_config.active_speed == SPEED_1000) + led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON); + + tw32(MAC_LED_CTRL, led_ctrl); + udelay(40); + } + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; if (tp->link_config.active_duplex == DUPLEX_HALF) tp->mac_mode |= MAC_MODE_HALF_DUPLEX; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + if (tg3_asic_rev(tp) == ASIC_REV_5700) { if (current_link_up == 1 && tg3_5700_link_polarity(tp, tp->link_config.active_speed)) tp->mac_mode |= MAC_MODE_LINK_POLARITY; @@ -4469,7 +4535,7 @@ relink: * ??? send/receive packets... */ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && - tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { + tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; tw32_f(MAC_MI_MODE, tp->mi_mode); udelay(80); @@ -4488,7 +4554,7 @@ relink: } udelay(40); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && + if (tg3_asic_rev(tp) == ASIC_REV_5700 && current_link_up == 1 && tp->link_config.active_speed == SPEED_1000 && (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { @@ -4943,8 +5009,8 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) port_a = 1; current_link_up = 0; - if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && - tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { + if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { workaround = 1; if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) port_a = 0; @@ -5273,7 +5339,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) err |= tg3_readphy(tp, MII_BMSR, &bmsr); err |= tg3_readphy(tp, MII_BMSR, &bmsr); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + if (tg3_asic_rev(tp) == ASIC_REV_5714) { if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) bmsr |= BMSR_LSTATUS; else @@ -5342,8 +5408,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) bmcr = new_bmcr; err |= tg3_readphy(tp, MII_BMSR, &bmsr); err |= tg3_readphy(tp, MII_BMSR, &bmsr); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5714) { + if (tg3_asic_rev(tp) == ASIC_REV_5714) { if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) bmsr |= BMSR_LSTATUS; else @@ -5478,7 +5543,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) else err = tg3_setup_copper_phy(tp, force_reset); - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { + if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { u32 scale; val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; @@ -5496,7 +5561,8 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | (6 << TX_LENGTHS_IPG_SHIFT); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + if (tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) val |= tr32(MAC_TX_LENGTHS) & (TX_LENGTHS_JMB_FRM_LEN_MSK | TX_LENGTHS_CNT_DWN_VAL_MSK); @@ -5785,10 +5851,8 @@ static void tg3_dump_state(struct tg3 *tp) u32 *regs; regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); - if (!regs) { - netdev_err(tp->dev, "Failed allocating register dump buffer\n"); + if (!regs) return; - } if (tg3_flag(tp, PCI_EXPRESS)) { /* Read up to but not including private PCI registers */ @@ -7122,7 +7186,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, dma_addr_t new_addr = 0; int ret = 0; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) + if (tg3_asic_rev(tp) != ASIC_REV_5701) new_skb = skb_copy(skb, GFP_ATOMIC); else { int more_headroom = 4 - ((unsigned long)skb->data & 3); @@ -7296,7 +7360,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) } else if (tg3_flag(tp, HW_TSO_2)) mss |= hdr_len << 9; else if (tg3_flag(tp, HW_TSO_1) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + tg3_asic_rev(tp) == ASIC_REV_5705) { if (tcp_opt_len || iph->ihl > 5) { int tsflags; @@ -7452,7 +7516,7 @@ static void tg3_mac_loopback(struct tg3 *tp, bool enable) if (tg3_flag(tp, 5705_PLUS) || (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tg3_asic_rev(tp) == ASIC_REV_5700) tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; } @@ -7511,7 +7575,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) udelay(40); if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + tg3_asic_rev(tp) == ASIC_REV_5785) { tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | MII_TG3_FET_PTEST_FRC_TX_LINK | MII_TG3_FET_PTEST_FRC_TX_LOCK); @@ -7535,7 +7599,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) else mac_mode |= MAC_MODE_PORT_MODE_MII; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + if (tg3_asic_rev(tp) == ASIC_REV_5700) { u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; if (masked_phy_id == TG3_PHY_ID_BCM5401) @@ -8213,7 +8277,7 @@ static void tg3_restore_pci_state(struct tg3 *tp) /* Set MAX PCI retry to zero. */ val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); - if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && tg3_flag(tp, PCIX_MODE)) val |= PCISTATE_RETRY_SAME_DMA; /* Allow reads and writes to the APE register and memory space. */ @@ -8285,7 +8349,7 @@ static int tg3_chip_reset(struct tg3 *tp) */ tg3_save_pci_state(tp); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + if (tg3_asic_rev(tp) == ASIC_REV_5752 || tg3_flag(tp, 5755_PLUS)) tw32(GRC_FASTBOOT_PC, 0); @@ -8320,7 +8384,7 @@ static int tg3_chip_reset(struct tg3 *tp) for (i = 0; i < tp->irq_cnt; i++) synchronize_irq(tp->napi[i].irq_vec); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + if (tg3_asic_rev(tp) == ASIC_REV_57780) { val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); } @@ -8330,19 +8394,19 @@ static int tg3_chip_reset(struct tg3 *tp) if (tg3_flag(tp, PCI_EXPRESS)) { /* Force PCIe 1.0a mode */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + if (tg3_asic_rev(tp) != ASIC_REV_5785 && !tg3_flag(tp, 57765_PLUS) && tr32(TG3_PCIE_PHY_TSTCTL) == (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); - if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { + if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { tw32(GRC_MISC_CFG, (1 << 29)); val |= (1 << 29); } } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tg3_asic_rev(tp) == ASIC_REV_5906) { tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); tw32(GRC_VCPU_EXT_CTRL, tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); @@ -8385,7 +8449,7 @@ static int tg3_chip_reset(struct tg3 *tp) if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { u16 val16; - if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { int j; u32 cfg_val; @@ -8426,23 +8490,33 @@ static int tg3_chip_reset(struct tg3 *tp) val = tr32(MEMARB_MODE); tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); - if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { tg3_stop_fw(tp); tw32(0x5000, 0x400); } + if (tg3_flag(tp, IS_SSB_CORE)) { + /* + * BCM4785: In order to avoid repercussions from using + * potentially defective internal ROM, stop the Rx RISC CPU, + * which is not required. + */ + tg3_stop_fw(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + } + tw32(GRC_MODE, tp->grc_mode); - if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { val = tr32(0xc4); tw32(0xc4, val | (1 << 15)); } if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + tg3_asic_rev(tp) == ASIC_REV_5705) { tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; - if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); } @@ -8468,15 +8542,15 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_mdio_start(tp); if (tg3_flag(tp, PCI_EXPRESS) && - tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && + tg3_asic_rev(tp) != ASIC_REV_5785 && !tg3_flag(tp, 57765_PLUS)) { val = tr32(0x7c00); tw32(0x7c00, val | (1 << 25)); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + if (tg3_asic_rev(tp) == ASIC_REV_5720) { val = tr32(TG3_CPMU_CLCK_ORIDE); tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); } @@ -8687,7 +8761,8 @@ static void tg3_rings_reset(struct tg3 *tp) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; else if (tg3_flag(tp, 5717_PLUS)) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; - else if (tg3_flag(tp, 57765_CLASS)) + else if (tg3_flag(tp, 57765_CLASS) || + tg3_asic_rev(tp) == ASIC_REV_5762) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; else limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; @@ -8703,7 +8778,8 @@ static void tg3_rings_reset(struct tg3 *tp) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; else if (!tg3_flag(tp, 5705_PLUS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + else if (tg3_asic_rev(tp) == ASIC_REV_5755 || + tg3_asic_rev(tp) == ASIC_REV_5762 || tg3_flag(tp, 57765_CLASS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; else @@ -8809,12 +8885,12 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) if (!tg3_flag(tp, 5750_PLUS) || tg3_flag(tp, 5780_CLASS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + tg3_asic_rev(tp) == ASIC_REV_5750 || + tg3_asic_rev(tp) == ASIC_REV_5752 || tg3_flag(tp, 57765_PLUS)) bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + else if (tg3_asic_rev(tp) == ASIC_REV_5755 || + tg3_asic_rev(tp) == ASIC_REV_5787) bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; else bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; @@ -8994,9 +9070,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) /* Enable MAC control of LPI */ if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { - tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, - TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | - TG3_CPMU_EEE_LNKIDL_UART_IDL); + val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | + TG3_CPMU_EEE_LNKIDL_UART_IDL; + if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) + val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; + + tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); tw32_f(TG3_CPMU_EEE_CTRL, TG3_CPMU_EEE_CTRL_EXIT_20_1_US); @@ -9006,7 +9085,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) TG3_CPMU_EEEMD_LPI_IN_RX | TG3_CPMU_EEEMD_EEE_ENABLE; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + if (tg3_asic_rev(tp) != ASIC_REV_5717) val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; if (tg3_flag(tp, ENABLE_APE)) @@ -9032,7 +9111,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tg3_write_sig_legacy(tp, RESET_KIND_INIT); - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { + if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { val = tr32(TG3_CPMU_CTRL); val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); tw32(TG3_CPMU_CTRL, val); @@ -9053,7 +9132,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(TG3_CPMU_HST_ACC, val); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + if (tg3_asic_rev(tp) == ASIC_REV_57780) { val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | PCIE_PWR_MGMT_L1_THRESH_4MS; @@ -9083,7 +9162,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } if (tg3_flag(tp, 57765_CLASS)) { - if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { u32 grc_mode = tr32(GRC_MODE); /* Access the lower 1K of PL PCIE block registers. */ @@ -9098,8 +9177,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(GRC_MODE, grc_mode); } - if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) { - u32 grc_mode = tr32(GRC_MODE); + if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { + u32 grc_mode; + + /* Fix transmit hangs */ + val = tr32(TG3_CPMU_PADRNG_CTL); + val |= TG3_CPMU_PADRNG_CTL_RDIV2; + tw32(TG3_CPMU_PADRNG_CTL, val); + + grc_mode = tr32(GRC_MODE); /* Access the lower 1K of DL PCIE block registers. */ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; @@ -9131,7 +9217,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); } - if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && tg3_flag(tp, PCIX_MODE)) { val = tr32(TG3PCI_PCISTATE); val |= PCISTATE_RETRY_SAME_DMA; @@ -9149,7 +9235,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(TG3PCI_PCISTATE, val); } - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { + if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { /* Enable some hw fixes. */ val = tr32(TG3PCI_MSI_DATA); val |= (1 << 26) | (1 << 28) | (1 << 29); @@ -9168,14 +9254,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (tg3_flag(tp, 57765_PLUS)) { val = tr32(TG3PCI_DMA_RW_CTRL) & ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; - if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) + if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; if (!tg3_flag(tp, 57765_CLASS) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + tg3_asic_rev(tp) != ASIC_REV_5717 && + tg3_asic_rev(tp) != ASIC_REV_5762) val |= DMA_RWCTRL_TAGGED_STAT_WA; tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { + } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && + tg3_asic_rev(tp) != ASIC_REV_5761) { /* This value is determined during the probe time DMA * engine test, tg3_test_dma. */ @@ -9215,9 +9302,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) /* Initialize MBUF/DESC pool. */ if (tg3_flag(tp, 5750_PLUS)) { /* Do nothing. */ - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { + } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + if (tg3_asic_rev(tp) == ASIC_REV_5704) tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); else tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); @@ -9255,11 +9342,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tp->bufmgr_config.dma_high_water); val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + if (tg3_asic_rev(tp) == ASIC_REV_5719) val |= BUFMGR_MODE_NO_TX_UNDERRUN; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; tw32(BUFMGR_MODE, val); for (i = 0; i < 2000; i++) { @@ -9272,7 +9359,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) return -ENODEV; } - if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); tg3_setup_rxbd_thresholds(tp); @@ -9310,7 +9397,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) /* Program the jumbo buffer descriptor ring control * blocks on those devices that have them. */ - if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { if (tg3_flag(tp, JUMBO_RING_ENABLE)) { @@ -9323,7 +9410,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, val | BDINFO_FLAGS_USE_EXT_RECV); if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || - tg3_flag(tp, 57765_CLASS)) + tg3_flag(tp, 57765_CLASS) || + tg3_asic_rev(tp) == ASIC_REV_5762) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_JUMBO_BUFFER_DESC); } else { @@ -9365,7 +9453,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) (6 << TX_LENGTHS_IPG_SHIFT) | (32 << TX_LENGTHS_SLOT_TIME_SHIFT); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + if (tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) val |= tr32(MAC_TX_LENGTHS) & (TX_LENGTHS_JMB_FRM_LEN_MSK | TX_LENGTHS_CNT_DWN_VAL_MSK); @@ -9385,20 +9474,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | RDMAC_MODE_LNGREAD_ENAB); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + if (tg3_asic_rev(tp) == ASIC_REV_5717) rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + if (tg3_asic_rev(tp) == ASIC_REV_5784 || + tg3_asic_rev(tp) == ASIC_REV_5785 || + tg3_asic_rev(tp) == ASIC_REV_57780) rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | RDMAC_MODE_MBUF_RBD_CRPT_ENAB | RDMAC_MODE_MBUF_SBD_CRPT_ENAB; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_asic_rev(tp) == ASIC_REV_5705 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { if (tg3_flag(tp, TSO_CAPABLE) && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + tg3_asic_rev(tp) == ASIC_REV_5705) { rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && !tg3_flag(tp, IS_5788)) { @@ -9409,26 +9498,43 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (tg3_flag(tp, PCI_EXPRESS)) rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + if (tg3_asic_rev(tp) == ASIC_REV_57766) { + tp->dma_limit = 0; + if (tp->dev->mtu <= ETH_DATA_LEN) { + rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; + tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; + } + } + if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; if (tg3_flag(tp, 57765_PLUS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + tg3_asic_rev(tp) == ASIC_REV_5785 || + tg3_asic_rev(tp) == ASIC_REV_57780) rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + if (tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + if (tg3_asic_rev(tp) == ASIC_REV_5761 || + tg3_asic_rev(tp) == ASIC_REV_5784 || + tg3_asic_rev(tp) == ASIC_REV_5785 || + tg3_asic_rev(tp) == ASIC_REV_57780 || tg3_flag(tp, 57765_PLUS)) { - val = tr32(TG3_RDMA_RSRVCTRL_REG); - if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) { + u32 tgtreg; + + if (tg3_asic_rev(tp) == ASIC_REV_5762) + tgtreg = TG3_RDMA_RSRVCTRL_REG2; + else + tgtreg = TG3_RDMA_RSRVCTRL_REG; + + val = tr32(tgtreg); + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || + tg3_asic_rev(tp) == ASIC_REV_5762) { val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); @@ -9436,14 +9542,21 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; } - tw32(TG3_RDMA_RSRVCTRL_REG, - val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); + tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { - val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); - tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val | + if (tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) { + u32 tgtreg; + + if (tg3_asic_rev(tp) == ASIC_REV_5762) + tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; + else + tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; + + val = tr32(tgtreg); + tw32(tgtreg, val | TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); } @@ -9520,7 +9633,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; if (!tg3_flag(tp, 5705_PLUS) && !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) + tg3_asic_rev(tp) != ASIC_REV_5700) tp->mac_mode |= MAC_MODE_LINK_POLARITY; tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); udelay(40); @@ -9538,11 +9651,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + if (tg3_asic_rev(tp) == ASIC_REV_5752) gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | GRC_LCLCTRL_GPIO_OUTPUT3; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + if (tg3_asic_rev(tp) == ASIC_REV_5755) gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; tp->grc_local_ctrl &= ~gpio_mask; @@ -9577,11 +9690,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | WDMAC_MODE_LNGREAD_ENAB); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_asic_rev(tp) == ASIC_REV_5705 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { if (tg3_flag(tp, TSO_CAPABLE) && - (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || - tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { + (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { /* nothing */ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && !tg3_flag(tp, IS_5788)) { @@ -9593,7 +9706,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (tg3_flag(tp, 5755_PLUS)) val |= WDMAC_MODE_STATUS_TAG_FIX; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + if (tg3_asic_rev(tp) == ASIC_REV_5785) val |= WDMAC_MODE_BURST_ALL_DATA; tw32_f(WDMAC_MODE, val); @@ -9604,10 +9717,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, &pcix_cmd); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { + if (tg3_asic_rev(tp) == ASIC_REV_5703) { pcix_cmd &= ~PCI_X_CMD_MAX_READ; pcix_cmd |= PCI_X_CMD_READ_2K; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); pcix_cmd |= PCI_X_CMD_READ_2K; } @@ -9618,7 +9731,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32_f(RDMAC_MODE, rdmac_mode); udelay(40); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { + if (tg3_asic_rev(tp) == ASIC_REV_5719) { for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) break; @@ -9635,7 +9748,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (!tg3_flag(tp, 5705_PLUS)) tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + if (tg3_asic_rev(tp) == ASIC_REV_5761) tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); else @@ -9658,7 +9771,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(SNDBDI_MODE, val); tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { err = tg3_load_5701_a0_firmware_fix(tp); if (err) return err; @@ -9673,10 +9786,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tp->tx_mode = TX_MODE_ENABLE; if (tg3_flag(tp, 5755_PLUS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_asic_rev(tp) == ASIC_REV_5906) tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + if (tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) { val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; tp->tx_mode &= ~val; tp->tx_mode |= tr32(MAC_TX_MODE) & val; @@ -9727,8 +9841,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) udelay(10); if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && - !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { + if ((tg3_asic_rev(tp) == ASIC_REV_5704) && + !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { /* Set drive transmission level to 1.2V */ /* only if the signal pre-emphasis bit is not set */ val = tr32(MAC_SERDES_CFG); @@ -9736,7 +9850,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) val |= 0x880; tw32(MAC_SERDES_CFG, val); } - if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) tw32(MAC_SERDES_CFG, 0x616000); } @@ -9749,14 +9863,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) val = 2; tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && + if (tg3_asic_rev(tp) == ASIC_REV_5704 && (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { /* Use hardware link auto-negotiation */ tg3_flag_set(tp, HW_AUTONEG); } if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + tg3_asic_rev(tp) == ASIC_REV_5714) { u32 tmp; tmp = tr32(SERDES_RX_CTRL); @@ -10010,9 +10124,9 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp) TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 && - tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) { + if (tg3_asic_rev(tp) != ASIC_REV_5717 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); } else { u32 val = tr32(HOSTCC_FLOW_ATTN); @@ -10060,10 +10174,15 @@ static void tg3_timer(unsigned long __opaque) spin_lock(&tp->lock); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + if (tg3_asic_rev(tp) == ASIC_REV_5717 || tg3_flag(tp, 57765_CLASS)) tg3_chk_missed_msi(tp); + if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { + /* BCM4785: Flush posted writes from GbE to host memory. */ + tr32(HOSTCC_MODE); + } + if (!tg3_flag(tp, TAGGED_STATUS)) { /* All of this garbage is because when using non-tagged * IRQ status the mailbox/status_block protocol the chip @@ -10181,7 +10300,7 @@ restart_timer: static void tg3_timer_init(struct tg3 *tp) { if (tg3_flag(tp, TAGGED_STATUS) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + tg3_asic_rev(tp) != ASIC_REV_5717 && !tg3_flag(tp, 57765_CLASS)) tp->timer_offset = HZ; else @@ -10762,7 +10881,7 @@ static int tg3_open(struct net_device *dev) if (tp->fw_needed) { err = tg3_request_firmware(tp); - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { if (err) return err; } else if (err) { @@ -10832,8 +10951,8 @@ static u64 tg3_calc_crc_errors(struct tg3 *tp) struct tg3_hw_stats *hw_stats = tp->hw_stats; if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701)) { u32 val; if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { @@ -12357,11 +12476,12 @@ static int tg3_test_memory(struct tg3 *tp) if (tg3_flag(tp, 5717_PLUS)) mem_tbl = mem_tbl_5717; - else if (tg3_flag(tp, 57765_CLASS)) + else if (tg3_flag(tp, 57765_CLASS) || + tg3_asic_rev(tp) == ASIC_REV_5762) mem_tbl = mem_tbl_57765; else if (tg3_flag(tp, 5755_PLUS)) mem_tbl = mem_tbl_5755; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + else if (tg3_asic_rev(tp) == ASIC_REV_5906) mem_tbl = mem_tbl_5906; else if (tg3_flag(tp, 5705_PLUS)) mem_tbl = mem_tbl_5705; @@ -12473,7 +12593,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) } else if (tg3_flag(tp, HW_TSO_2)) mss |= hdr_len << 9; else if (tg3_flag(tp, HW_TSO_1) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + tg3_asic_rev(tp) == ASIC_REV_5705) { mss |= (TG3_TSO_TCP_OPT_LEN << 9); } else { base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); @@ -12659,7 +12779,7 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) * errata. Also, the MAC loopback test is deprecated for * all newer ASIC revisions. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && + if (tg3_asic_rev(tp) != ASIC_REV_5780 && !tg3_flag(tp, CPMU_PRESENT)) { tg3_mac_loopback(tp, true); @@ -12937,7 +13057,8 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EAGAIN; spin_lock_bh(&tp->lock); - err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); + err = __tg3_readphy(tp, data->phy_id & 0x1f, + data->reg_num & 0x1f, &mii_regval); spin_unlock_bh(&tp->lock); data->val_out = mii_regval; @@ -12953,7 +13074,8 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EAGAIN; spin_lock_bh(&tp->lock); - err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); + err = __tg3_writephy(tp, data->phy_id & 0x1f, + data->reg_num & 0x1f, data->val_in); spin_unlock_bh(&tp->lock); return err; @@ -13144,7 +13266,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) /* Reset PHY, otherwise the read DMA engine will be in a mode that * breaks all requests to 256 bytes. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) + if (tg3_asic_rev(tp) == ASIC_REV_57766) reset_phy = 1; err = tg3_restart_hw(tp, reset_phy); @@ -13257,7 +13379,7 @@ static void tg3_get_nvram_info(struct tg3 *tp) tw32(NVRAM_CFG1, nvcfg1); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + if (tg3_asic_rev(tp) == ASIC_REV_5750 || tg3_flag(tp, 5780_CLASS)) { switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: @@ -13698,6 +13820,22 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp) nvcfg1 = tr32(NVRAM_CFG1); nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; + if (tg3_asic_rev(tp) == ASIC_REV_5762) { + if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { + tg3_flag_set(tp, NO_NVRAM); + return; + } + + switch (nvmpinstrp) { + case FLASH_5762_EEPROM_HD: + nvmpinstrp = FLASH_5720_EEPROM_HD; + break; + case FLASH_5762_EEPROM_LD: + nvmpinstrp = FLASH_5720_EEPROM_LD; + break; + } + } + switch (nvmpinstrp) { case FLASH_5720_EEPROM_HD: case FLASH_5720_EEPROM_LD: @@ -13743,7 +13881,8 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp) tp->nvram_size = TG3_NVRAM_SIZE_1MB; break; default: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; + if (tg3_asic_rev(tp) != ASIC_REV_5762) + tp->nvram_size = TG3_NVRAM_SIZE_128KB; break; } break; @@ -13789,7 +13928,8 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp) tp->nvram_size = TG3_NVRAM_SIZE_1MB; break; default: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; + if (tg3_asic_rev(tp) != ASIC_REV_5762) + tp->nvram_size = TG3_NVRAM_SIZE_128KB; break; } break; @@ -13801,11 +13941,30 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp) tg3_nvram_get_pagesize(tp, nvcfg1); if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + + if (tg3_asic_rev(tp) == ASIC_REV_5762) { + u32 val; + + if (tg3_nvram_read(tp, 0, &val)) + return; + + if (val != TG3_EEPROM_MAGIC && + (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) + tg3_flag_set(tp, NO_NVRAM); + } } /* Chips other than 5700/5701 use the NVRAM for fetching info. */ static void tg3_nvram_init(struct tg3 *tp) { + if (tg3_flag(tp, IS_SSB_CORE)) { + /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ + tg3_flag_clear(tp, NVRAM); + tg3_flag_clear(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, NO_NVRAM); + return; + } + tw32_f(GRC_EEPROM_ADDR, (EEPROM_ADDR_FSM_RESET | (EEPROM_DEFAULT_CLOCK_PERIOD << @@ -13818,8 +13977,8 @@ static void tg3_nvram_init(struct tg3 *tp) tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); udelay(100); - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { + if (tg3_asic_rev(tp) != ASIC_REV_5700 && + tg3_asic_rev(tp) != ASIC_REV_5701) { tg3_flag_set(tp, NVRAM); if (tg3_nvram_lock(tp)) { @@ -13832,25 +13991,26 @@ static void tg3_nvram_init(struct tg3 *tp) tp->nvram_size = 0; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + if (tg3_asic_rev(tp) == ASIC_REV_5752) tg3_get_5752_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + else if (tg3_asic_rev(tp) == ASIC_REV_5755) tg3_get_5755_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + else if (tg3_asic_rev(tp) == ASIC_REV_5787 || + tg3_asic_rev(tp) == ASIC_REV_5784 || + tg3_asic_rev(tp) == ASIC_REV_5785) tg3_get_5787_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + else if (tg3_asic_rev(tp) == ASIC_REV_5761) tg3_get_5761_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + else if (tg3_asic_rev(tp) == ASIC_REV_5906) tg3_get_5906_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + else if (tg3_asic_rev(tp) == ASIC_REV_57780 || tg3_flag(tp, 57765_CLASS)) tg3_get_57780_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + else if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719) tg3_get_5717_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + else if (tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) tg3_get_5720_nvram_info(tp); else tg3_get_nvram_info(tp); @@ -13963,7 +14123,7 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) tg3_flag_set(tp, EEPROM_WRITE_PROT); tg3_flag_set(tp, WOL_CAP); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tg3_asic_rev(tp) == ASIC_REV_5906) { if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { tg3_flag_clear(tp, EEPROM_WRITE_PROT); tg3_flag_set(tp, IS_NIC); @@ -13990,13 +14150,13 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); ver >>= NIC_SRAM_DATA_VER_SHIFT; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 && + if (tg3_asic_rev(tp) != ASIC_REV_5700 && + tg3_asic_rev(tp) != ASIC_REV_5701 && + tg3_asic_rev(tp) != ASIC_REV_5703 && (ver > 0) && (ver < 0x100)) tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + if (tg3_asic_rev(tp) == ASIC_REV_5785) tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == @@ -14044,18 +14204,16 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) /* Default to PHY_1_MODE if 0 (MAC_MODE) is * read on some older 5700/5701 bootcode. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5701) + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) tp->led_ctrl = LED_CTRL_MODE_PHY_1; break; case SHASTA_EXT_LED_SHARED: tp->led_ctrl = LED_CTRL_MODE_SHARED; - if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && - tp->pci_chip_rev_id != CHIPREV_ID_5750_A1) + if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | LED_CTRL_MODE_PHY_2); break; @@ -14066,19 +14224,19 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) case SHASTA_EXT_LED_COMBO: tp->led_ctrl = LED_CTRL_MODE_COMBO; - if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) + if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | LED_CTRL_MODE_PHY_2); break; } - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && + if ((tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) && tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) tp->led_ctrl = LED_CTRL_MODE_PHY_2; - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) + if (tg3_chip_rev(tp) == CHIPREV_5784_AX) tp->led_ctrl = LED_CTRL_MODE_PHY_1; if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { @@ -14122,13 +14280,13 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; if ((tg3_flag(tp, 57765_PLUS) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && - GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && + (tg3_asic_rev(tp) == ASIC_REV_5784 && + tg3_chip_rev(tp) != CHIPREV_5784_AX)) && (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; if (tg3_flag(tp, PCI_EXPRESS) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + tg3_asic_rev(tp) != ASIC_REV_5785 && !tg3_flag(tp, 57765_PLUS)) { u32 cfg3; @@ -14152,6 +14310,39 @@ done: device_set_wakeup_capable(&tp->pdev->dev, false); } +static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) +{ + int i, err; + u32 val2, off = offset * 8; + + err = tg3_nvram_lock(tp); + if (err) + return err; + + tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); + tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | + APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); + tg3_ape_read32(tp, TG3_APE_OTP_CTRL); + udelay(10); + + for (i = 0; i < 100; i++) { + val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); + if (val2 & APE_OTP_STATUS_CMD_DONE) { + *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); + break; + } + udelay(10); + } + + tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); + + tg3_nvram_unlock(tp); + if (val2 & APE_OTP_STATUS_CMD_DONE) + return 0; + + return -EBUSY; +} + static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd) { int i; @@ -14298,10 +14489,19 @@ static int tg3_phy_probe(struct tg3 *tp) * subsys device table. */ p = tg3_lookup_by_subsys(tp); - if (!p) + if (p) { + tp->phy_id = p->phy_id; + } else if (!tg3_flag(tp, IS_SSB_CORE)) { + /* For now we saw the IDs 0xbc050cd0, + * 0xbc050f80 and 0xbc050c30 on devices + * connected to an BCM4785 and there are + * probably more. Just assume that the phy is + * supported when it is connected to a SSB core + * for now. + */ return -ENODEV; + } - tp->phy_id = p->phy_id; if (!tp->phy_id || tp->phy_id == TG3_PHY_ID_BCM8002) tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; @@ -14309,12 +14509,13 @@ static int tg3_phy_probe(struct tg3 *tp) } if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 || - (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 && - tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))) + (tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762 || + (tg3_asic_rev(tp) == ASIC_REV_5717 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || + (tg3_asic_rev(tp) == ASIC_REV_57765 && + tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) tp->phy_flags |= TG3_PHYFLG_EEE_CAP; tg3_phy_init_link_config(tp); @@ -14424,7 +14625,7 @@ out_not_found: return; out_no_vpd: - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + if (tg3_asic_rev(tp) == ASIC_REV_5717) { if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) strcpy(tp->board_part_number, "BCM5717"); @@ -14432,7 +14633,7 @@ out_no_vpd: strcpy(tp->board_part_number, "BCM5718"); else goto nomatch; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) strcpy(tp->board_part_number, "BCM57780"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) @@ -14443,7 +14644,7 @@ out_no_vpd: strcpy(tp->board_part_number, "BCM57788"); else goto nomatch; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) strcpy(tp->board_part_number, "BCM57761"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) @@ -14458,7 +14659,7 @@ out_no_vpd: strcpy(tp->board_part_number, "BCM57795"); else goto nomatch; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) { + } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) strcpy(tp->board_part_number, "BCM57762"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) @@ -14469,7 +14670,7 @@ out_no_vpd: strcpy(tp->board_part_number, "BCM57786"); else goto nomatch; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { strcpy(tp->board_part_number, "BCM95906"); } else { nomatch: @@ -14691,6 +14892,8 @@ static void tg3_read_dash_ver(struct tg3 *tp) if (tg3_flag(tp, APE_HAS_NCSI)) fwtype = "NCSI"; + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) + fwtype = "SMASH"; else fwtype = "DASH"; @@ -14704,6 +14907,31 @@ static void tg3_read_dash_ver(struct tg3 *tp) (apedata & APE_FW_VERSION_BLDMSK)); } +static void tg3_read_otp_ver(struct tg3 *tp) +{ + u32 val, val2; + + if (tg3_asic_rev(tp) != ASIC_REV_5762) + return; + + if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && + !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && + TG3_OTP_MAGIC0_VALID(val)) { + u64 val64 = (u64) val << 32 | val2; + u32 ver = 0; + int i, vlen; + + for (i = 0; i < 7; i++) { + if ((val64 & 0xff) == 0) + break; + ver = val64 & 0xff; + val64 >>= 8; + } + vlen = strlen(tp->fw_ver); + snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); + } +} + static void tg3_read_fw_ver(struct tg3 *tp) { u32 val; @@ -14714,6 +14942,7 @@ static void tg3_read_fw_ver(struct tg3 *tp) if (tg3_flag(tp, NO_NVRAM)) { strcat(tp->fw_ver, "sb"); + tg3_read_otp_ver(tp); return; } @@ -14788,7 +15017,7 @@ static struct pci_dev *tg3_find_peer(struct tg3 *tp) static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) { tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { + if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { u32 reg; /* All devices that use the alternate @@ -14800,7 +15029,10 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) reg = TG3PCI_GEN2_PRODID_ASICREV; else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || @@ -14822,46 +15054,47 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) /* Wrong chip ID in 5752 A0. This code can be removed later * as A0 is not in production. */ - if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; - if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0) + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) tg3_flag_set(tp, 5717_PLUS); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) + if (tg3_asic_rev(tp) == ASIC_REV_57765 || + tg3_asic_rev(tp) == ASIC_REV_57766) tg3_flag_set(tp, 57765_CLASS); - if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS)) + if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || + tg3_asic_rev(tp) == ASIC_REV_5762) tg3_flag_set(tp, 57765_PLUS); /* Intentionally exclude ASIC_REV_5906 */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + if (tg3_asic_rev(tp) == ASIC_REV_5755 || + tg3_asic_rev(tp) == ASIC_REV_5787 || + tg3_asic_rev(tp) == ASIC_REV_5784 || + tg3_asic_rev(tp) == ASIC_REV_5761 || + tg3_asic_rev(tp) == ASIC_REV_5785 || + tg3_asic_rev(tp) == ASIC_REV_57780 || tg3_flag(tp, 57765_PLUS)) tg3_flag_set(tp, 5755_PLUS); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) + if (tg3_asic_rev(tp) == ASIC_REV_5780 || + tg3_asic_rev(tp) == ASIC_REV_5714) tg3_flag_set(tp, 5780_CLASS); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || + if (tg3_asic_rev(tp) == ASIC_REV_5750 || + tg3_asic_rev(tp) == ASIC_REV_5752 || + tg3_asic_rev(tp) == ASIC_REV_5906 || tg3_flag(tp, 5755_PLUS) || tg3_flag(tp, 5780_CLASS)) tg3_flag_set(tp, 5750_PLUS); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || + if (tg3_asic_rev(tp) == ASIC_REV_5705 || tg3_flag(tp, 5750_PLUS)) tg3_flag_set(tp, 5705_PLUS); } @@ -14871,13 +15104,13 @@ static bool tg3_10_100_only_device(struct tg3 *tp, { u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && - (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || + if ((tg3_asic_rev(tp) == ASIC_REV_5703 && + (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || (tp->phy_flags & TG3_PHYFLG_IS_FET)) return true; if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + if (tg3_asic_rev(tp) == ASIC_REV_5705) { if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) return true; } else { @@ -14938,8 +15171,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) * enable this workaround if the 5703 is on the secondary * bus of these ICH bridges. */ - if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) || - (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) { + if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || + (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { static struct tg3_dev_id { u32 vendor; u32 device; @@ -14979,7 +15212,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) } } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + if (tg3_asic_rev(tp) == ASIC_REV_5701) { static struct tg3_dev_id { u32 vendor; u32 device; @@ -15039,29 +15272,29 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) } while (bridge); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) + if (tg3_asic_rev(tp) == ASIC_REV_5704 || + tg3_asic_rev(tp) == ASIC_REV_5714) tp->pdev_peer = tg3_find_peer(tp); /* Determine TSO capabilities */ - if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) ; /* Do nothing. HW bug. */ else if (tg3_flag(tp, 57765_PLUS)) tg3_flag_set(tp, HW_TSO_3); else if (tg3_flag(tp, 5755_PLUS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_asic_rev(tp) == ASIC_REV_5906) tg3_flag_set(tp, HW_TSO_2); else if (tg3_flag(tp, 5750_PLUS)) { tg3_flag_set(tp, HW_TSO_1); tg3_flag_set(tp, TSO_BUG); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && - tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) + if (tg3_asic_rev(tp) == ASIC_REV_5750 && + tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) tg3_flag_clear(tp, TSO_BUG); - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && - tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && + tg3_asic_rev(tp) != ASIC_REV_5701 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { tg3_flag_set(tp, TSO_BUG); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) + if (tg3_asic_rev(tp) == ASIC_REV_5705) tp->fw_needed = FIRMWARE_TG3TSO5; else tp->fw_needed = FIRMWARE_TG3TSO; @@ -15083,22 +15316,22 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) tp->fw_needed = NULL; } - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) tp->fw_needed = FIRMWARE_TG3; tp->irq_max = 1; if (tg3_flag(tp, 5750_PLUS)) { tg3_flag_set(tp, SUPPORT_MSI); - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || - GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && - tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && + if (tg3_chip_rev(tp) == CHIPREV_5750_AX || + tg3_chip_rev(tp) == CHIPREV_5750_BX || + (tg3_asic_rev(tp) == ASIC_REV_5714 && + tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && tp->pdev_peer == tp->pdev)) tg3_flag_clear(tp, SUPPORT_MSI); if (tg3_flag(tp, 5755_PLUS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tg3_asic_rev(tp) == ASIC_REV_5906) { tg3_flag_set(tp, 1SHOT_MSI); } @@ -15114,25 +15347,26 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) tp->rxq_max = TG3_RSS_MAX_NUM_QS; tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + if (tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) tp->txq_max = tp->irq_max - 1; } if (tg3_flag(tp, 5755_PLUS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_asic_rev(tp) == ASIC_REV_5906) tg3_flag_set(tp, SHORT_DMA_BUG); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + if (tg3_asic_rev(tp) == ASIC_REV_5719) tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) tg3_flag_set(tp, LRG_PROD_RING_CAP); if (tg3_flag(tp, 57765_PLUS) && - tp->pci_chip_rev_id != CHIPREV_ID_5719_A0) + tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) tg3_flag_set(tp, USE_JUMBO_BDFLAG); if (!tg3_flag(tp, 5705_PLUS) || @@ -15150,20 +15384,19 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5906) { + if (tg3_asic_rev(tp) == ASIC_REV_5906) { tg3_flag_clear(tp, HW_TSO_2); tg3_flag_clear(tp, TSO_CAPABLE); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || - tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) + if (tg3_asic_rev(tp) == ASIC_REV_5784 || + tg3_asic_rev(tp) == ASIC_REV_5761 || + tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) tg3_flag_set(tp, CLKREQ_BUG); - } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { + } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { tg3_flag_set(tp, L1PLLPD_EN); } - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { /* BCM5785 devices are effectively PCIe devices, and should * follow PCIe codepaths, but do not have a PCIe capabilities * section. @@ -15196,7 +15429,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) &tp->pci_cacheline_sz); pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, &tp->pci_lat_timer); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && + if (tg3_asic_rev(tp) == ASIC_REV_5703 && tp->pci_lat_timer < 64) { tp->pci_lat_timer = 64; pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, @@ -15206,7 +15439,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) /* Important! -- It is critical that the PCI-X hw workaround * situation is decided before the first MMIO register access. */ - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { + if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { /* 5700 BX chips need to have their TX producer index * mailboxes written twice to workaround a bug. */ @@ -15248,7 +15481,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) tg3_flag_set(tp, PCI_32BIT); /* Chip-specific fixup from Broadcom driver */ - if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && + if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { pci_state_reg |= PCISTATE_RETRY_SAME_DMA; pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); @@ -15265,9 +15498,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) /* Various workaround register access methods */ if (tg3_flag(tp, PCIX_TARGET_HWBUG)) tp->write32 = tg3_write_indirect_reg32; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || + else if (tg3_asic_rev(tp) == ASIC_REV_5701 || (tg3_flag(tp, PCI_EXPRESS) && - tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { + tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { /* * Back to back register writes can cause problems on these * chips, the workaround is to read back all reg writes @@ -15299,7 +15532,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) pci_cmd &= ~PCI_COMMAND_MEMORY; pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tg3_asic_rev(tp) == ASIC_REV_5906) { tp->read32_mbox = tg3_read32_mbox_5906; tp->write32_mbox = tg3_write32_mbox_5906; tp->write32_tx_mbox = tg3_write32_mbox_5906; @@ -15308,8 +15541,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) if (tp->write32 == tg3_write_indirect_reg32 || (tg3_flag(tp, PCIX_MODE) && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) + (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701))) tg3_flag_set(tp, SRAM_USE_CONFIG); /* The memory arbiter has to be enabled in order for SRAM accesses @@ -15321,7 +15554,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + if (tg3_asic_rev(tp) == ASIC_REV_5704 || tg3_flag(tp, 5780_CLASS)) { if (tg3_flag(tp, PCIX_MODE)) { pci_read_config_dword(tp->pdev, @@ -15329,21 +15562,23 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) &val); tp->pci_fn = val & 0x7; } - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) { tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); - if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) == - NIC_SRAM_CPMUSTAT_SIG) { - tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717; - tp->pci_fn = tp->pci_fn ? 1 : 0; - } - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { - tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); - if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) == - NIC_SRAM_CPMUSTAT_SIG) { + if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) + val = tr32(TG3_CPMU_STATUS); + + if (tg3_asic_rev(tp) == ASIC_REV_5717) + tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; + else tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> TG3_CPMU_STATUS_FSHFT_5719; - } + } + + if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { + tp->write32_tx_mbox = tg3_write_flush_reg32; + tp->write32_rx_mbox = tg3_write_flush_reg32; } /* Get eeprom hw config before calling tg3_set_power_state(). @@ -15381,18 +15616,18 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) * It is also used as eeprom write protect on LOMs. */ tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + if (tg3_asic_rev(tp) == ASIC_REV_5700 || tg3_flag(tp, EEPROM_WRITE_PROT)) tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1); /* Unused GPIO3 must be driven as output on 5752 because there * are no pull-up resistors on unused GPIO pins. */ - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + else if (tg3_asic_rev(tp) == ASIC_REV_5752) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + if (tg3_asic_rev(tp) == ASIC_REV_5755 || + tg3_asic_rev(tp) == ASIC_REV_57780 || tg3_flag(tp, 57765_CLASS)) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; @@ -15406,6 +15641,10 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) GRC_LCLCTRL_GPIO_OUTPUT0; } + if (tg3_asic_rev(tp) == ASIC_REV_5762) + tp->grc_local_ctrl |= + tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; + /* Switch out of Vaux if it is a NIC */ tg3_pwrsrc_switch_to_vmain(tp); @@ -15416,42 +15655,42 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) tg3_flag_set(tp, JUMBO_RING_ENABLE); /* Determine WakeOnLan speed to use. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { tg3_flag_clear(tp, WOL_SPEED_100MB); } else { tg3_flag_set(tp, WOL_SPEED_100MB); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + if (tg3_asic_rev(tp) == ASIC_REV_5906) tp->phy_flags |= TG3_PHYFLG_IS_FET; /* A few boards don't want Ethernet@WireSpeed phy feature */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && - (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + (tg3_asic_rev(tp) == ASIC_REV_5705 && + (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && + (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || (tp->phy_flags & TG3_PHYFLG_IS_FET) || (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || - GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) + if (tg3_chip_rev(tp) == CHIPREV_5703_AX || + tg3_chip_rev(tp) == CHIPREV_5704_AX) tp->phy_flags |= TG3_PHYFLG_ADC_BUG; - if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; if (tg3_flag(tp, 5705_PLUS) && !(tp->phy_flags & TG3_PHYFLG_IS_FET) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && + tg3_asic_rev(tp) != ASIC_REV_5785 && + tg3_asic_rev(tp) != ASIC_REV_57780 && !tg3_flag(tp, 57765_PLUS)) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + if (tg3_asic_rev(tp) == ASIC_REV_5755 || + tg3_asic_rev(tp) == ASIC_REV_5787 || + tg3_asic_rev(tp) == ASIC_REV_5784 || + tg3_asic_rev(tp) == ASIC_REV_5761) { if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; @@ -15461,8 +15700,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) tp->phy_flags |= TG3_PHYFLG_BER_BUG; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && - GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { + if (tg3_asic_rev(tp) == ASIC_REV_5784 && + tg3_chip_rev(tp) != CHIPREV_5784_AX) { tp->phy_otp = tg3_read_otp_phycfg(tp); if (tp->phy_otp == 0) tp->phy_otp = TG3_OTP_DEFAULT; @@ -15474,20 +15713,20 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) tp->mi_mode = MAC_MI_MODE_BASE; tp->coalesce_mode = 0; - if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && - GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) + if (tg3_chip_rev(tp) != CHIPREV_5700_AX && + tg3_chip_rev(tp) != CHIPREV_5700_BX) tp->coalesce_mode |= HOSTCC_MODE_32BYTE; /* Set these bits to enable statistics workaround. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) { + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { tp->coalesce_mode |= HOSTCC_MODE_ATTN; tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + if (tg3_asic_rev(tp) == ASIC_REV_5785 || + tg3_asic_rev(tp) == ASIC_REV_57780) tg3_flag_set(tp, USE_PHYLIB); err = tg3_mdio_init(tp); @@ -15496,7 +15735,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) /* Initialize data/descriptor byte/word swapping. */ val = tr32(GRC_MODE); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + if (tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | GRC_MODE_WORD_SWAP_B2HRX_DATA | GRC_MODE_B2HRX_ENABLE | @@ -15516,12 +15756,10 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) &pci_state_reg); if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && !tg3_flag(tp, PCIX_TARGET_HWBUG)) { - u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); - - if (chiprevid == CHIPREV_ID_5701_A0 || - chiprevid == CHIPREV_ID_5701_B0 || - chiprevid == CHIPREV_ID_5701_B2 || - chiprevid == CHIPREV_ID_5701_B5) { + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { void __iomem *sram_base; /* Write some dummy words into the SRAM status block @@ -15544,13 +15782,13 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) grc_misc_cfg = tr32(GRC_MISC_CFG); grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + if (tg3_asic_rev(tp) == ASIC_REV_5705 && (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) tg3_flag_set(tp, IS_5788); if (!tg3_flag(tp, IS_5788) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) + tg3_asic_rev(tp) != ASIC_REV_5700) tg3_flag_set(tp, TAGGED_STATUS); if (tg3_flag(tp, TAGGED_STATUS)) { tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | @@ -15583,7 +15821,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; } else { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + if (tg3_asic_rev(tp) == ASIC_REV_5700) tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; else tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; @@ -15593,7 +15831,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) * change bit implementation, so we must use the * status register in those cases. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + if (tg3_asic_rev(tp) == ASIC_REV_5700) tg3_flag_set(tp, USE_LINKCHG_REG); else tg3_flag_clear(tp, USE_LINKCHG_REG); @@ -15603,7 +15841,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) * upon subsystem IDs. */ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && + tg3_asic_rev(tp) == ASIC_REV_5701 && !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; tg3_flag_set(tp, USE_LINKCHG_REG); @@ -15617,7 +15855,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && + if (tg3_asic_rev(tp) == ASIC_REV_5701 && tg3_flag(tp, PCIX_MODE)) { tp->rx_offset = NET_SKB_PAD; #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS @@ -15634,9 +15872,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) /* Increment the rx prod index on the rx std ring by at most * 8 for these chips to workaround hw errata. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + if (tg3_asic_rev(tp) == ASIC_REV_5750 || + tg3_asic_rev(tp) == ASIC_REV_5752 || + tg3_asic_rev(tp) == ASIC_REV_5755) tp->rx_std_max_post = 8; if (tg3_flag(tp, ASPM_WORKAROUND)) @@ -15658,7 +15896,6 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp) addr = of_get_property(dp, "local-mac-address", &len); if (addr && len == 6) { memcpy(dev->dev_addr, addr, 6); - memcpy(dev->perm_addr, dev->dev_addr, 6); return 0; } return -ENODEV; @@ -15669,7 +15906,6 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp) struct net_device *dev = tp->dev; memcpy(dev->dev_addr, idprom->id_ethaddr, 6); - memcpy(dev->perm_addr, idprom->id_ethaddr, 6); return 0; } #endif @@ -15679,14 +15915,21 @@ static int tg3_get_device_address(struct tg3 *tp) struct net_device *dev = tp->dev; u32 hi, lo, mac_offset; int addr_ok = 0; + int err; #ifdef CONFIG_SPARC if (!tg3_get_macaddr_sparc(tp)) return 0; #endif + if (tg3_flag(tp, IS_SSB_CORE)) { + err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]); + if (!err && is_valid_ether_addr(&dev->dev_addr[0])) + return 0; + } + mac_offset = 0x7c; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + if (tg3_asic_rev(tp) == ASIC_REV_5704 || tg3_flag(tp, 5780_CLASS)) { if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) mac_offset = 0xcc; @@ -15699,7 +15942,7 @@ static int tg3_get_device_address(struct tg3 *tp) mac_offset = 0xcc; if (tp->pci_fn > 1) mac_offset += 0x18c; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + } else if (tg3_asic_rev(tp) == ASIC_REV_5906) mac_offset = 0x10; /* First try to get it from MAC address mailbox. */ @@ -15746,7 +15989,6 @@ static int tg3_get_device_address(struct tg3 *tp) #endif return -EINVAL; } - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); return 0; } @@ -15768,8 +16010,8 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) /* On 5703 and later chips, the boundary bits have no * effect. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + if (tg3_asic_rev(tp) != ASIC_REV_5700 && + tg3_asic_rev(tp) != ASIC_REV_5701 && !tg3_flag(tp, PCI_EXPRESS)) goto out; @@ -16007,14 +16249,14 @@ static int tg3_test_dma(struct tg3 *tp) /* DMA read watermark not used on PCIE */ tp->dma_rwctrl |= 0x00180000; } else if (!tg3_flag(tp, PCIX_MODE)) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) + if (tg3_asic_rev(tp) == ASIC_REV_5705 || + tg3_asic_rev(tp) == ASIC_REV_5750) tp->dma_rwctrl |= 0x003f0000; else tp->dma_rwctrl |= 0x003f000f; } else { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + if (tg3_asic_rev(tp) == ASIC_REV_5703 || + tg3_asic_rev(tp) == ASIC_REV_5704) { u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); u32 read_water = 0x7; @@ -16023,35 +16265,37 @@ static int tg3_test_dma(struct tg3 *tp) * better performance. */ if (tg3_flag(tp, 40BIT_DMA_BUG) && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + tg3_asic_rev(tp) == ASIC_REV_5704) tp->dma_rwctrl |= 0x8000; else if (ccval == 0x6 || ccval == 0x7) tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) + if (tg3_asic_rev(tp) == ASIC_REV_5703) read_water = 4; /* Set bit 23 to enable PCIX hw bug fix */ tp->dma_rwctrl |= (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | (1 << 23); - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { + } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { /* 5780 always in PCIX mode */ tp->dma_rwctrl |= 0x00144000; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { /* 5714 always in PCIX mode */ tp->dma_rwctrl |= 0x00148000; } else { tp->dma_rwctrl |= 0x001b000f; } } + if (tg3_flag(tp, ONE_DMA_AT_ONCE)) + tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + if (tg3_asic_rev(tp) == ASIC_REV_5703 || + tg3_asic_rev(tp) == ASIC_REV_5704) tp->dma_rwctrl &= 0xfffffff0; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) { /* Remove this if it causes problems for some boards. */ tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; @@ -16075,8 +16319,8 @@ static int tg3_test_dma(struct tg3 *tp) tg3_switch_clocks(tp); #endif - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) + if (tg3_asic_rev(tp) != ASIC_REV_5700 && + tg3_asic_rev(tp) != ASIC_REV_5701) goto out; /* It is best to perform DMA test with maximum write burst size @@ -16195,7 +16439,7 @@ static void tg3_init_bufmgr_config(struct tg3 *tp) DEFAULT_MB_MACRX_LOW_WATER_5705; tp->bufmgr_config.mbuf_high_water = DEFAULT_MB_HIGH_WATER_5705; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tg3_asic_rev(tp) == ASIC_REV_5906) { tp->bufmgr_config.mbuf_mac_rx_low_water = DEFAULT_MB_MACRX_LOW_WATER_5906; tp->bufmgr_config.mbuf_high_water = @@ -16253,6 +16497,7 @@ static char *tg3_phy_string(struct tg3 *tp) case TG3_PHY_ID_BCM57765: return "57765"; case TG3_PHY_ID_BCM5719C: return "5719C"; case TG3_PHY_ID_BCM5720C: return "5720C"; + case TG3_PHY_ID_BCM5762: return "5762C"; case TG3_PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; default: return "unknown"; @@ -16389,6 +16634,18 @@ static int tg3_init_one(struct pci_dev *pdev, else tp->msg_enable = TG3_DEF_MSG_ENABLE; + if (pdev_is_ssb_gige_core(pdev)) { + tg3_flag_set(tp, IS_SSB_CORE); + if (ssb_gige_must_flush_posted_writes(pdev)) + tg3_flag_set(tp, FLUSH_POSTED_WRITES); + if (ssb_gige_one_dma_at_once(pdev)) + tg3_flag_set(tp, ONE_DMA_AT_ONCE); + if (ssb_gige_have_roboswitch(pdev)) + tg3_flag_set(tp, ROBOSWITCH); + if (ssb_gige_is_rgmii(pdev)) + tg3_flag_set(tp, RGMII_MODE); + } + /* The word/byte swap controls here control register access byte * swapping. DMA data byte swapping is controlled in the GRC_MODE * setting below. @@ -16429,7 +16686,10 @@ static int tg3_init_one(struct pci_dev *pdev, tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) { + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) { tg3_flag_set(tp, ENABLE_APE); tp->aperegs = pci_ioremap_bar(pdev, BAR_2); if (!tp->aperegs) { @@ -16501,7 +16761,7 @@ static int tg3_init_one(struct pci_dev *pdev, /* 5700 B0 chips do not support checksumming correctly due * to hardware bugs. */ - if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) { + if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; if (tg3_flag(tp, 5755_PLUS)) @@ -16521,11 +16781,11 @@ static int tg3_init_one(struct pci_dev *pdev, if (features & NETIF_F_IPV6_CSUM) features |= NETIF_F_TSO6; if (tg3_flag(tp, HW_TSO_3) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && - GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + tg3_asic_rev(tp) == ASIC_REV_5761 || + (tg3_asic_rev(tp) == ASIC_REV_5784 && + tg3_chip_rev(tp) != CHIPREV_5784_AX) || + tg3_asic_rev(tp) == ASIC_REV_5785 || + tg3_asic_rev(tp) == ASIC_REV_57780) features |= NETIF_F_TSO_ECN; } @@ -16537,14 +16797,14 @@ static int tg3_init_one(struct pci_dev *pdev, * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY * loopback for the remaining devices. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && + if (tg3_asic_rev(tp) != ASIC_REV_5780 && !tg3_flag(tp, CPMU_PRESENT)) /* Add the loopback capability */ features |= NETIF_F_LOOPBACK; dev->hw_features |= features; - if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && !tg3_flag(tp, TSO_CAPABLE) && !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { tg3_flag_set(tp, MAX_RXPEND_64); @@ -16623,8 +16883,9 @@ static int tg3_init_one(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + if (tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) tg3_flag_set(tp, PTP_CAPABLE); if (tg3_flag(tp, 5717_PLUS)) { @@ -16634,6 +16895,8 @@ static int tg3_init_one(struct pci_dev *pdev, tg3_timer_init(tp); + tg3_carrier_off(tp); + err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Cannot register net device, aborting\n"); @@ -16642,7 +16905,7 @@ static int tg3_init_one(struct pci_dev *pdev, netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", tp->board_part_number, - tp->pci_chip_rev_id, + tg3_chip_rev_id(tp), tg3_bus_string(tp, str), dev->dev_addr); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index d330e81f5793..8d7d4c2ab5d6 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -4,7 +4,7 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2007-2012 Broadcom Corporation. + * Copyright (C) 2007-2013 Broadcom Corporation. */ #ifndef _T3_H @@ -65,6 +65,9 @@ #define TG3PCI_DEVICE_TIGON3_57766 0x1686 #define TG3PCI_DEVICE_TIGON3_57786 0x16b3 #define TG3PCI_DEVICE_TIGON3_57782 0x16b7 +#define TG3PCI_DEVICE_TIGON3_5762 0x1687 +#define TG3PCI_DEVICE_TIGON3_5725 0x1643 +#define TG3PCI_DEVICE_TIGON3_5727 0x16f3 /* 0x04 --> 0x2c unused */ #define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 @@ -117,9 +120,7 @@ #define MISC_HOST_CTRL_TAGGED_STATUS 0x00000200 #define MISC_HOST_CTRL_CHIPREV 0xffff0000 #define MISC_HOST_CTRL_CHIPREV_SHIFT 16 -#define GET_CHIP_REV_ID(MISC_HOST_CTRL) \ - (((MISC_HOST_CTRL) & MISC_HOST_CTRL_CHIPREV) >> \ - MISC_HOST_CTRL_CHIPREV_SHIFT) + #define CHIPREV_ID_5700_A0 0x7000 #define CHIPREV_ID_5700_A1 0x7001 #define CHIPREV_ID_5700_B0 0x7100 @@ -159,7 +160,8 @@ #define CHIPREV_ID_57765_A0 0x57785000 #define CHIPREV_ID_5719_A0 0x05719000 #define CHIPREV_ID_5720_A0 0x05720000 -#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) +#define CHIPREV_ID_5762_A0 0x05762000 + #define ASIC_REV_5700 0x07 #define ASIC_REV_5701 0x00 #define ASIC_REV_5703 0x01 @@ -182,7 +184,7 @@ #define ASIC_REV_5719 0x5719 #define ASIC_REV_5720 0x5720 #define ASIC_REV_57766 0x57766 -#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) +#define ASIC_REV_5762 0x5762 #define CHIPREV_5700_AX 0x70 #define CHIPREV_5700_BX 0x71 #define CHIPREV_5700_CX 0x72 @@ -195,7 +197,6 @@ #define CHIPREV_5784_AX 0x57840 #define CHIPREV_5761_AX 0x57610 #define CHIPREV_57765_AX 0x577650 -#define GET_METAL_REV(CHIP_REV_ID) ((CHIP_REV_ID) & 0xff) #define METAL_REV_A0 0x00 #define METAL_REV_A1 0x01 #define METAL_REV_B0 0x00 @@ -774,7 +775,7 @@ #define SG_DIG_AUTONEG_ERROR 0x00000001 #define TG3_TX_TSTAMP_LSB 0x000005c0 #define TG3_TX_TSTAMP_MSB 0x000005c4 -#define TG3_TSTAMP_MASK 0x7fffffffffffffff +#define TG3_TSTAMP_MASK 0x7fffffffffffffffLL /* 0x5c8 --> 0x600 unused */ #define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */ #define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */ @@ -1159,6 +1160,8 @@ #define CPMU_MUTEX_GNT_DRIVER 0x00001000 #define TG3_CPMU_PHY_STRAP 0x00003664 #define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020 +#define TG3_CPMU_PADRNG_CTL 0x00003668 +#define TG3_CPMU_PADRNG_CTL_RDIV2 0x00040000 /* 0x3664 --> 0x36b0 unused */ #define TG3_CPMU_EEE_MODE 0x000036b0 @@ -1178,6 +1181,7 @@ #define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc #define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 #define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004 +#define TG3_CPMU_EEE_LNKIDL_APE_TX_MT 0x00000002 /* 0x36c0 --> 0x36d0 unused */ #define TG3_CPMU_EEE_CTRL 0x000036d0 @@ -1400,7 +1404,10 @@ #define RDMAC_STATUS_FIFOURUN 0x00000080 #define RDMAC_STATUS_FIFOOREAD 0x00000100 #define RDMAC_STATUS_LNGREAD 0x00000200 -/* 0x4808 --> 0x4900 unused */ +/* 0x4808 --> 0x4890 unused */ + +#define TG3_RDMA_RSRVCTRL_REG2 0x00004890 +#define TG3_LSO_RD_DMA_CRPTEN_CTRL2 0x000048a0 #define TG3_RDMA_RSRVCTRL_REG 0x00004900 #define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 @@ -1850,6 +1857,7 @@ #define FLASH_VENDOR_SST_SMALL 0x00000001 #define FLASH_VENDOR_SST_LARGE 0x02000001 #define NVRAM_CFG1_5752VENDOR_MASK 0x03c00003 +#define NVRAM_CFG1_5762VENDOR_MASK 0x03e00003 #define FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ 0x00000000 #define FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ 0x02000000 #define FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED 0x02000003 @@ -1910,6 +1918,8 @@ #define FLASH_5717VENDOR_ST_45USPT 0x03400001 #define FLASH_5720_EEPROM_HD 0x00000001 #define FLASH_5720_EEPROM_LD 0x00000003 +#define FLASH_5762_EEPROM_HD 0x02000001 +#define FLASH_5762_EEPROM_LD 0x02000003 #define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000 #define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002 #define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001 @@ -2365,6 +2375,20 @@ #define APE_LOCK_REQ_DRIVER 0x00001000 #define TG3_APE_LOCK_GRANT 0x004c #define APE_LOCK_GRANT_DRIVER 0x00001000 +#define TG3_APE_OTP_CTRL 0x00e8 +#define APE_OTP_CTRL_PROG_EN 0x200000 +#define APE_OTP_CTRL_CMD_RD 0x000000 +#define APE_OTP_CTRL_START 0x000001 +#define TG3_APE_OTP_STATUS 0x00ec +#define APE_OTP_STATUS_CMD_DONE 0x000001 +#define TG3_APE_OTP_ADDR 0x00f0 +#define APE_OTP_ADDR_CPU_ENABLE 0x80000000 +#define TG3_APE_OTP_RD_DATA 0x00f8 + +#define OTP_ADDRESS_MAGIC0 0x00000050 +#define TG3_OTP_MAGIC0_VALID(val) \ + ((((val) & 0xf0000000) == 0xa0000000) ||\ + (((val) & 0x0f000000) == 0x0a000000)) /* APE shared memory. Accessible through BAR1 */ #define TG3_APE_SHMEM_BASE 0x4000 @@ -3030,6 +3054,11 @@ enum TG3_FLAGS { TG3_FLAG_57765_PLUS, TG3_FLAG_57765_CLASS, TG3_FLAG_5717_PLUS, + TG3_FLAG_IS_SSB_CORE, + TG3_FLAG_FLUSH_POSTED_WRITES, + TG3_FLAG_ROBOSWITCH, + TG3_FLAG_ONE_DMA_AT_ONCE, + TG3_FLAG_RGMII_MODE, /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ @@ -3206,6 +3235,7 @@ struct tg3 { #define TG3_PHY_ID_BCM57765 0x5c0d8a40 #define TG3_PHY_ID_BCM5719C 0x5c0d8a20 #define TG3_PHY_ID_BCM5720C 0x5c0d8b60 +#define TG3_PHY_ID_BCM5762 0x85803780 #define TG3_PHY_ID_BCM5906 0xdc00ac40 #define TG3_PHY_ID_BCM8002 0x60010140 #define TG3_PHY_ID_INVALID 0xffffffff @@ -3230,6 +3260,7 @@ struct tg3 { (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \ (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \ (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \ + (X) == TG3_PHY_ID_BCM5720C || (X) == TG3_PHY_ID_BCM5762 || \ (X) == TG3_PHY_ID_BCM8002) u32 phy_flags; @@ -3320,10 +3351,22 @@ struct tg3 { const struct firmware *fw; u32 fw_len; /* includes BSS */ -#if IS_ENABLED(CONFIG_HWMON) struct device *hwmon_dev; -#endif bool link_up; }; +/* Accessor macros for chip and asic attributes + * + * nb: Using static inlines equivalent to the accessor macros generates + * larger object code with gcc 4.7. + * Using statement expression macros to check tp with + * typecheck(struct tg3 *, tp) also creates larger objects. + */ +#define tg3_chip_rev_id(tp) \ + ((tp)->pci_chip_rev_id) +#define tg3_asic_rev(tp) \ + ((tp)->pci_chip_rev_id >> 12) +#define tg3_chip_rev(tp) \ + ((tp)->pci_chip_rev_id >> 8) + #endif /* !(_T3_H) */ diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index b9d4bb9530e5..79039439bfdc 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -287,7 +287,7 @@ static int macb_mii_probe(struct net_device *dev) } /* attach the mac to the phy */ - ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0, + ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, bp->phy_interface); if (ret) { netdev_err(dev, "Could not attach to PHY\n"); diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index f7f02900f650..a170065b5973 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1463,7 +1463,6 @@ static int xgmac_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index c8fdeaae56c0..20d2085f61c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -131,7 +131,7 @@ static void t1_set_rxmode(struct net_device *dev) static void link_report(struct port_info *p) { if (!netif_carrier_ok(p->dev)) - printk(KERN_INFO "%s: link down\n", p->dev->name); + netdev_info(p->dev, "link down\n"); else { const char *s = "10Mbps"; @@ -141,9 +141,9 @@ static void link_report(struct port_info *p) case SPEED_100: s = "100Mbps"; break; } - printk(KERN_INFO "%s: link up, %s, %s-duplex\n", - p->dev->name, s, - p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); + netdev_info(p->dev, "link up, %s, %s-duplex\n", + s, p->link_config.duplex == DUPLEX_FULL + ? "full" : "half"); } } @@ -976,19 +976,13 @@ static const struct net_device_ops cxgb_netdev_ops = { static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int version_printed; - int i, err, pci_using_dac = 0; unsigned long mmio_start, mmio_len; const struct board_info *bi; struct adapter *adapter = NULL; struct port_info *pi; - if (!version_printed) { - printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION, - DRV_VERSION); - ++version_printed; - } + pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION); err = pci_enable_device(pdev); if (err) @@ -1124,8 +1118,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < bi->port_number; ++i) { err = register_netdev(adapter->port[i].dev); if (err) - pr_warning("%s: cannot register net device %s, skipping\n", - pci_name(pdev), adapter->port[i].dev->name); + pr_warn("%s: cannot register net device %s, skipping\n", + pci_name(pdev), adapter->port[i].dev->name); else { /* * Change the name we use for messages to the name of @@ -1143,10 +1137,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_release_adapter_res; } - printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name, - bi->desc, adapter->params.chip_revision, - adapter->params.pci.is_pcix ? "PCIX" : "PCI", - adapter->params.pci.speed, adapter->params.pci.width); + pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n", + adapter->name, bi->desc, adapter->params.chip_revision, + adapter->params.pci.is_pcix ? "PCIX" : "PCI", + adapter->params.pci.speed, adapter->params.pci.width); /* * Set the T1B ASIC and memory clocks. diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index d84872e88171..482976925154 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1822,8 +1822,8 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) */ if (unlikely(skb->len < ETH_HLEN || skb->len > dev->mtu + eth_hdr_len(skb->data))) { - pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name, - skb->len, eth_hdr_len(skb->data), dev->mtu); + netdev_dbg(dev, "packet size %d hdr %d mtu%d\n", + skb->len, eth_hdr_len(skb->data), dev->mtu); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -1831,7 +1831,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->ip_summed == CHECKSUM_PARTIAL && ip_hdr(skb)->protocol == IPPROTO_UDP) { if (unlikely(skb_checksum_help(skb))) { - pr_debug("%s: unable to do udp checksum\n", dev->name); + netdev_dbg(dev, "unable to do udp checksum\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index f15ee326d5c1..2b5e62193cea 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -29,6 +29,9 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> @@ -153,7 +156,7 @@ struct workqueue_struct *cxgb3_wq; static void link_report(struct net_device *dev) { if (!netif_carrier_ok(dev)) - printk(KERN_INFO "%s: link down\n", dev->name); + netdev_info(dev, "link down\n"); else { const char *s = "10Mbps"; const struct port_info *p = netdev_priv(dev); @@ -170,8 +173,9 @@ static void link_report(struct net_device *dev) break; } - printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s, - p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); + netdev_info(dev, "link up, %s, %s-duplex\n", + s, p->link_config.duplex == DUPLEX_FULL + ? "full" : "half"); } } @@ -318,10 +322,10 @@ void t3_os_phymod_changed(struct adapter *adap, int port_id) const struct port_info *pi = netdev_priv(dev); if (pi->phy.modtype == phy_modtype_none) - printk(KERN_INFO "%s: PHY module unplugged\n", dev->name); + netdev_info(dev, "PHY module unplugged\n"); else - printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name, - mod_str[pi->phy.modtype]); + netdev_info(dev, "%s PHY module inserted\n", + mod_str[pi->phy.modtype]); } static void cxgb_set_rxmode(struct net_device *dev) @@ -1422,8 +1426,7 @@ static int cxgb_open(struct net_device *dev) if (is_offload(adapter) && !ofld_disable) { err = offload_open(dev); if (err) - printk(KERN_WARNING - "Could not initialize offload capabilities\n"); + pr_warn("Could not initialize offload capabilities\n"); } netif_set_real_num_tx_queues(dev, pi->nqsets); @@ -3132,14 +3135,13 @@ static void print_port_info(struct adapter *adap, const struct adapter_info *ai) if (!test_bit(i, &adap->registered_device_map)) continue; - printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n", - dev->name, ai->desc, pi->phy.desc, - is_offload(adap) ? "R" : "", adap->params.rev, buf, - (adap->flags & USING_MSIX) ? " MSI-X" : - (adap->flags & USING_MSI) ? " MSI" : ""); + netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n", + ai->desc, pi->phy.desc, + is_offload(adap) ? "R" : "", adap->params.rev, buf, + (adap->flags & USING_MSIX) ? " MSI-X" : + (adap->flags & USING_MSI) ? " MSI" : ""); if (adap->name == dev->name && adap->params.vpd.mclk) - printk(KERN_INFO - "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n", + pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n", adap->name, t3_mc7_size(&adap->cm) >> 20, t3_mc7_size(&adap->pmtx) >> 20, t3_mc7_size(&adap->pmrx) >> 20, @@ -3177,24 +3179,18 @@ static void cxgb3_init_iscsi_mac(struct net_device *dev) NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int version_printed; - int i, err, pci_using_dac = 0; resource_size_t mmio_start, mmio_len; const struct adapter_info *ai; struct adapter *adapter = NULL; struct port_info *pi; - if (!version_printed) { - printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); - ++version_printed; - } + pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION); if (!cxgb3_wq) { cxgb3_wq = create_singlethread_workqueue(DRV_NAME); if (!cxgb3_wq) { - printk(KERN_ERR DRV_NAME - ": cannot initialize work queue\n"); + pr_err("cannot initialize work queue\n"); return -ENOMEM; } } diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 942dace361d2..4232767862b5 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -30,6 +30,8 @@ * SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/list.h> #include <linux/slab.h> #include <net/neighbour.h> @@ -62,9 +64,8 @@ static const unsigned int MAX_ATIDS = 64 * 1024; static const unsigned int ATID_BASE = 0x10000; static void cxgb_neigh_update(struct neighbour *neigh); -static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh, - struct dst_entry *new, struct neighbour *new_neigh, - const void *daddr); +static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new, + struct neighbour *neigh, const void *daddr); static inline int offload_activated(struct t3cdev *tdev) { @@ -182,14 +183,17 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter, struct net_device *dev = adapter->port[i]; if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { + rcu_read_lock(); if (vlan && vlan != VLAN_VID_MASK) { - rcu_read_lock(); dev = __vlan_find_dev_deep(dev, vlan); - rcu_read_unlock(); } else if (netif_is_bond_slave(dev)) { - while (dev->master) - dev = dev->master; + struct net_device *upper_dev; + + while ((upper_dev = + netdev_master_upper_dev_get_rcu(dev))) + dev = upper_dev; } + rcu_read_unlock(); return dev; } } @@ -232,8 +236,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, if ((val >> S_MAXRXDATA) != 0x3f60) { val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE); val |= V_MAXRXDATA(0x3f60); - printk(KERN_INFO - "%s, iscsi set MaxRxData to 16224 (0x%x).\n", + pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n", adapter->name, val); t3_write_reg(adapter, A_TP_PARA_REG2, val); } @@ -253,8 +256,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, for (i = 0; i < 4; i++) val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) { - printk(KERN_INFO - "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n", + pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n", adapter->name, val, uiip->pgsz_factor[0], uiip->pgsz_factor[1], uiip->pgsz_factor[2], uiip->pgsz_factor[3]); @@ -706,8 +708,7 @@ static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb) struct cpl_smt_write_rpl *rpl = cplhdr(skb); if (rpl->status != CPL_ERR_NONE) - printk(KERN_ERR - "Unexpected SMT_WRITE_RPL status %u for entry %u\n", + pr_err("Unexpected SMT_WRITE_RPL status %u for entry %u\n", rpl->status, GET_TID(rpl)); return CPL_RET_BUF_DONE; @@ -718,8 +719,7 @@ static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb) struct cpl_l2t_write_rpl *rpl = cplhdr(skb); if (rpl->status != CPL_ERR_NONE) - printk(KERN_ERR - "Unexpected L2T_WRITE_RPL status %u for entry %u\n", + pr_err("Unexpected L2T_WRITE_RPL status %u for entry %u\n", rpl->status, GET_TID(rpl)); return CPL_RET_BUF_DONE; @@ -730,8 +730,7 @@ static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb) struct cpl_rte_write_rpl *rpl = cplhdr(skb); if (rpl->status != CPL_ERR_NONE) - printk(KERN_ERR - "Unexpected RTE_WRITE_RPL status %u for entry %u\n", + pr_err("Unexpected RTE_WRITE_RPL status %u for entry %u\n", rpl->status, GET_TID(rpl)); return CPL_RET_BUF_DONE; @@ -751,7 +750,7 @@ static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) t3c_tid-> ctx); } else { - printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", + pr_err("%s: received clientless CPL command 0x%x\n", dev->name, CPL_ACT_OPEN_RPL); return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; } @@ -769,7 +768,7 @@ static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) return t3c_tid->client->handlers[p->opcode] (dev, skb, t3c_tid->ctx); } else { - printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", + pr_err("%s: received clientless CPL command 0x%x\n", dev->name, p->opcode); return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; } @@ -787,7 +786,7 @@ static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) return t3c_tid->client->handlers[p->opcode] (dev, skb, t3c_tid->ctx); } else { - printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", + pr_err("%s: received clientless CPL command 0x%x\n", dev->name, p->opcode); return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; } @@ -814,7 +813,7 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb) return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] (dev, skb, t3c_tid->ctx); } else { - printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", + pr_err("%s: received clientless CPL command 0x%x\n", dev->name, CPL_PASS_ACCEPT_REQ); return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; } @@ -908,7 +907,7 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] (dev, skb, t3c_tid->ctx); } else { - printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", + pr_err("%s: received clientless CPL command 0x%x\n", dev->name, CPL_ACT_ESTABLISH); return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; } @@ -954,7 +953,7 @@ static int do_term(struct t3cdev *dev, struct sk_buff *skb) return t3c_tid->client->handlers[opcode] (dev, skb, t3c_tid->ctx); } else { - printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", + pr_err("%s: received clientless CPL command 0x%x\n", dev->name, opcode); return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; } @@ -970,10 +969,9 @@ static int nb_callback(struct notifier_block *self, unsigned long event, } case (NETEVENT_REDIRECT):{ struct netevent_redirect *nr = ctx; - cxgb_redirect(nr->old, nr->old_neigh, - nr->new, nr->new_neigh, + cxgb_redirect(nr->old, nr->new, nr->neigh, nr->daddr); - cxgb_neigh_update(nr->new_neigh); + cxgb_neigh_update(nr->neigh); break; } default: @@ -991,8 +989,7 @@ static struct notifier_block nb = { */ static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb) { - printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name, - *skb->data); + pr_err("%s: received bad CPL command 0x%x\n", dev->name, *skb->data); return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; } @@ -1010,8 +1007,8 @@ void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h) if (opcode < NUM_CPL_CMDS) cpl_handlers[opcode] = h ? h : do_bad_cpl; else - printk(KERN_ERR "T3C: handler registration for " - "opcode %x failed\n", opcode); + pr_err("T3C: handler registration for opcode %x failed\n", + opcode); } EXPORT_SYMBOL(t3_register_cpl_handler); @@ -1030,9 +1027,8 @@ static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) if (ret & CPL_RET_UNKNOWN_TID) { union opcode_tid *p = cplhdr(skb); - printk(KERN_ERR "%s: CPL message (opcode %u) had " - "unknown TID %u\n", dev->name, opcode, - G_TID(ntohl(p->opcode_tid))); + pr_err("%s: CPL message (opcode %u) had unknown TID %u\n", + dev->name, opcode, G_TID(ntohl(p->opcode_tid))); } #endif if (ret & CPL_RET_BUF_DONE) @@ -1096,7 +1092,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) skb = alloc_skb(sizeof(*req), GFP_ATOMIC); if (!skb) { - printk(KERN_ERR "%s: cannot allocate skb!\n", __func__); + pr_err("%s: cannot allocate skb!\n", __func__); return; } skb->priority = CPL_PRIORITY_CONTROL; @@ -1111,11 +1107,11 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) tdev->send(tdev, skb); } -static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh, - struct dst_entry *new, struct neighbour *new_neigh, +static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new, + struct neighbour *neigh, const void *daddr) { - struct net_device *olddev, *newdev; + struct net_device *dev; struct tid_info *ti; struct t3cdev *tdev; u32 tid; @@ -1123,29 +1119,17 @@ static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh, struct l2t_entry *e; struct t3c_tid_entry *te; - olddev = old_neigh->dev; - newdev = new_neigh->dev; + dev = neigh->dev; - if (!is_offloading(olddev)) - return; - if (!is_offloading(newdev)) { - printk(KERN_WARNING "%s: Redirect to non-offload " - "device ignored.\n", __func__); + if (!is_offloading(dev)) return; - } - tdev = dev2t3cdev(olddev); + tdev = dev2t3cdev(dev); BUG_ON(!tdev); - if (tdev != dev2t3cdev(newdev)) { - printk(KERN_WARNING "%s: Redirect to different " - "offload device ignored.\n", __func__); - return; - } /* Add new L2T entry */ - e = t3_l2t_get(tdev, new, newdev, daddr); + e = t3_l2t_get(tdev, new, dev, daddr); if (!e) { - printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", - __func__); + pr_err("%s: couldn't allocate new l2t entry!\n", __func__); return; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index dd901c5061b9..9d67eb794c4b 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -1278,7 +1278,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) } /* update port statistics */ - if (skb->ip_summed == CHECKSUM_COMPLETE) + if (skb->ip_summed == CHECKSUM_PARTIAL) qs->port_stats[SGE_PSTAT_TX_CSUM]++; if (skb_shinfo(skb)->gso_size) qs->port_stats[SGE_PSTAT_TSO]++; @@ -2130,8 +2130,10 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); - if (cpl->vlan_valid) + if (cpl->vlan_valid) { + qs->port_stats[SGE_PSTAT_VLANEX]++; __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan)); + } napi_gro_frags(&qs->napi); } diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index 3dee68612c9e..c74a898fcd4f 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -3725,8 +3725,6 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, memcpy(adapter->port[i]->dev_addr, hw_addr, ETH_ALEN); - memcpy(adapter->port[i]->perm_addr, hw_addr, - ETH_ALEN); init_link_config(&p->link_config, p->phy.caps); p->phy.ops->power_down(&p->phy, 1); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c306df7d4568..c6c05bfef0e0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4027,8 +4027,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) VFRES_NEQ, VFRES_NETHCTRL, VFRES_NIQFLINT, VFRES_NIQ, VFRES_TC, VFRES_NVI, - FW_PFVF_CMD_CMASK_GET( - FW_PFVF_CMD_CMASK_MASK), + FW_PFVF_CMD_CMASK_MASK, pfvfres_pmask( adapter, pf, vf), VFRES_NEXACTF, @@ -5142,7 +5141,7 @@ static int __init cxgb4_init_module(void) /* Debugfs support is optional, just warn if this fails */ cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); if (!cxgb4_debugfs_root) - pr_warning("could not create debugfs entry, continuing\n"); + pr_warn("could not create debugfs entry, continuing\n"); ret = pci_register_driver(&cxgb4_driver); if (ret < 0) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 22f3af5166bf..4ce62031f62f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3603,7 +3603,6 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) p->lport = j; p->rss_size = rss_size; memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); - memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); adap->port[i]->dev_id = j; ret = ntohl(c.u.info.lstatus_to_modtype); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 611396c4b381..68eaa9c88c7d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -466,7 +466,6 @@ static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx, u8 hw_addr[]) { memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN); - memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN); } /** diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 0188df705719..56b46ab2d4c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -33,6 +33,8 @@ * SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> @@ -196,11 +198,10 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) break; } - printk(KERN_INFO "%s: link up, %s, full-duplex, %s PAUSE\n", - dev->name, s, fc); + netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc); } else { netif_carrier_off(dev); - printk(KERN_INFO "%s: link down\n", dev->name); + netdev_info(dev, "link down\n"); } } @@ -2465,8 +2466,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = { static int cxgb4vf_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int version_printed; - int pci_using_dac; int err, pidx; unsigned int pmask; @@ -2478,10 +2477,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Print our driver banner the first time we're called to initialize a * device. */ - if (version_printed == 0) { - printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); - version_printed = 1; - } + pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION); /* * Initialize generic PCI device state. @@ -2920,18 +2916,15 @@ static int __init cxgb4vf_module_init(void) * Vet our module parameters. */ if (msi != MSI_MSIX && msi != MSI_MSI) { - printk(KERN_WARNING KBUILD_MODNAME - ": bad module parameter msi=%d; must be %d" - " (MSI-X or MSI) or %d (MSI)\n", - msi, MSI_MSIX, MSI_MSI); + pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n", + msi, MSI_MSIX, MSI_MSI); return -EINVAL; } /* Debugfs support is optional, just warn if this fails */ cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) - printk(KERN_WARNING KBUILD_MODNAME ": could not create" - " debugfs entry, continuing\n"); + pr_warn("could not create debugfs entry, continuing\n"); ret = pci_register_driver(&cxgb4vf_driver); if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 92170d50d9d8..9488032d6d2d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1477,8 +1477,10 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, skb->ip_summed = CHECKSUM_UNNECESSARY; skb_record_rx_queue(skb, rxq->rspq.idx); - if (pkt->vlan_ex) + if (pkt->vlan_ex) { __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); + rxq->stats.vlan_ex++; + } ret = napi_gro_frags(&rxq->rspq.napi); if (ret == GRO_HELD) @@ -1501,7 +1503,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, const struct pkt_gl *gl) { struct sk_buff *skb; - const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; + const struct cpl_rx_pkt *pkt = (void *)rsp; bool csum_ok = pkt->csum_calc && !pkt->err_vec; struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 78c55213eaf7..354cbb78ed50 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -710,8 +710,8 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 64866ff1aea0..ec1a233622c6 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -865,7 +865,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr) } memcpy(netdev->dev_addr, addr, netdev->addr_len); - netdev->addr_assign_type &= ~NET_ADDR_RANDOM; return 0; } @@ -1491,7 +1490,8 @@ static int enic_request_intr(struct enic *enic) for (i = 0; i < enic->rq_count; i++) { intr = enic_msix_rq_intr(enic, i); - sprintf(enic->msix[intr].devname, + snprintf(enic->msix[intr].devname, + sizeof(enic->msix[intr].devname), "%.11s-rx-%d", netdev->name, i); enic->msix[intr].isr = enic_isr_msix_rq; enic->msix[intr].devid = &enic->napi[i]; @@ -1499,20 +1499,23 @@ static int enic_request_intr(struct enic *enic) for (i = 0; i < enic->wq_count; i++) { intr = enic_msix_wq_intr(enic, i); - sprintf(enic->msix[intr].devname, + snprintf(enic->msix[intr].devname, + sizeof(enic->msix[intr].devname), "%.11s-tx-%d", netdev->name, i); enic->msix[intr].isr = enic_isr_msix_wq; enic->msix[intr].devid = enic; } intr = enic_msix_err_intr(enic); - sprintf(enic->msix[intr].devname, + snprintf(enic->msix[intr].devname, + sizeof(enic->msix[intr].devname), "%.11s-err", netdev->name); enic->msix[intr].isr = enic_isr_msix_err; enic->msix[intr].devid = enic; intr = enic_msix_notify_intr(enic); - sprintf(enic->msix[intr].devname, + snprintf(enic->msix[intr].devname, + sizeof(enic->msix[intr].devname), "%.11s-notify", netdev->name); enic->msix[intr].isr = enic_isr_msix_notify; enic->msix[intr].devid = enic; diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index c73472c369cd..8cdf02503d13 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -434,9 +434,10 @@ static void dm9000_get_drvinfo(struct net_device *dev, { board_info_t *dm = to_dm9000_board(dev); - strcpy(info->driver, CARDNAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, to_platform_device(dm->dev)->name); + strlcpy(info->driver, CARDNAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, to_platform_device(dm->dev)->name, + sizeof(info->bus_info)); } static u32 dm9000_get_msglevel(struct net_device *dev) diff --git a/drivers/net/ethernet/dec/Kconfig b/drivers/net/ethernet/dec/Kconfig index 37940279ded8..68262aa57d01 100644 --- a/drivers/net/ethernet/dec/Kconfig +++ b/drivers/net/ethernet/dec/Kconfig @@ -17,21 +17,5 @@ config NET_VENDOR_DEC your specific card in the following questions. if NET_VENDOR_DEC - -config EWRK3 - tristate "EtherWORKS 3 (DE203, DE204, DE205) support" - depends on ISA - select CRC32 - ---help--- - This driver supports the DE203, DE204 and DE205 network (Ethernet) - cards. If this is for you, say Y and read - <file:Documentation/networking/ewrk3.txt> in the kernel source as - well as the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called ewrk3. - source "drivers/net/ethernet/dec/tulip/Kconfig" - endif # NET_VENDOR_DEC diff --git a/drivers/net/ethernet/dec/Makefile b/drivers/net/ethernet/dec/Makefile index 1b01ed8d42c8..32993fccbbfd 100644 --- a/drivers/net/ethernet/dec/Makefile +++ b/drivers/net/ethernet/dec/Makefile @@ -2,5 +2,4 @@ # Makefile for the Digital Equipment Inc. network device drivers. # -obj-$(CONFIG_EWRK3) += ewrk3.o obj-$(CONFIG_NET_TULIP) += tulip/ diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c deleted file mode 100644 index 9f992b95eddc..000000000000 --- a/drivers/net/ethernet/dec/ewrk3.c +++ /dev/null @@ -1,1961 +0,0 @@ -/* ewrk3.c: A DIGITAL EtherWORKS 3 ethernet driver for Linux. - - Written 1994 by David C. Davies. - - Copyright 1994 Digital Equipment Corporation. - - This software may be used and distributed according to the terms of - the GNU General Public License, incorporated herein by reference. - - This driver is written for the Digital Equipment Corporation series - of EtherWORKS ethernet cards: - - DE203 Turbo (BNC) - DE204 Turbo (TP) - DE205 Turbo (TP BNC) - - The driver has been tested on a relatively busy network using the DE205 - card and benchmarked with 'ttcp': it transferred 16M of data at 975kB/s - (7.8Mb/s) to a DECstation 5000/200. - - The author may be reached at davies@maniac.ultranet.com. - - ========================================================================= - This driver has been written substantially from scratch, although its - inheritance of style and stack interface from 'depca.c' and in turn from - Donald Becker's 'lance.c' should be obvious. - - The DE203/4/5 boards all use a new proprietary chip in place of the - LANCE chip used in prior cards (DEPCA, DE100, DE200/1/2, DE210, DE422). - Use the depca.c driver in the standard distribution for the LANCE based - cards from DIGITAL; this driver will not work with them. - - The DE203/4/5 cards have 2 main modes: shared memory and I/O only. I/O - only makes all the card accesses through I/O transactions and no high - (shared) memory is used. This mode provides a >48% performance penalty - and is deprecated in this driver, although allowed to provide initial - setup when hardstrapped. - - The shared memory mode comes in 3 flavours: 2kB, 32kB and 64kB. There is - no point in using any mode other than the 2kB mode - their performances - are virtually identical, although the driver has been tested in the 2kB - and 32kB modes. I would suggest you uncomment the line: - - FORCE_2K_MODE; - - to allow the driver to configure the card as a 2kB card at your current - base address, thus leaving more room to clutter your system box with - other memory hungry boards. - - As many ISA and EISA cards can be supported under this driver as you - wish, limited primarily by the available IRQ lines, rather than by the - available I/O addresses (24 ISA, 16 EISA). I have checked different - configurations of multiple depca cards and ewrk3 cards and have not - found a problem yet (provided you have at least depca.c v0.38) ... - - The board IRQ setting must be at an unused IRQ which is auto-probed - using Donald Becker's autoprobe routines. All these cards are at - {5,10,11,15}. - - No 16MB memory limitation should exist with this driver as DMA is not - used and the common memory area is in low memory on the network card (my - current system has 20MB and I've not had problems yet). - - The ability to load this driver as a loadable module has been included - and used extensively during the driver development (to save those long - reboot sequences). To utilise this ability, you have to do 8 things: - - 0) have a copy of the loadable modules code installed on your system. - 1) copy ewrk3.c from the /linux/drivers/net directory to your favourite - temporary directory. - 2) edit the source code near line 1898 to reflect the I/O address and - IRQ you're using. - 3) compile ewrk3.c, but include -DMODULE in the command line to ensure - that the correct bits are compiled (see end of source code). - 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a - kernel with the ewrk3 configuration turned off and reboot. - 5) insmod ewrk3.o - [Alan Cox: Changed this so you can insmod ewrk3.o irq=x io=y] - [Adam Kropelin: now accepts irq=x1,x2 io=y1,y2 for multiple cards] - 6) run the net startup bits for your new eth?? interface manually - (usually /etc/rc.inet[12] at boot time). - 7) enjoy! - - Note that autoprobing is not allowed in loadable modules - the system is - already up and running and you're messing with interrupts. - - To unload a module, turn off the associated interface - 'ifconfig eth?? down' then 'rmmod ewrk3'. - - Promiscuous mode has been turned off in this driver, but all the - multicast address bits have been turned on. This improved the send - performance on a busy network by about 13%. - - Ioctl's have now been provided (primarily because I wanted to grab some - packet size statistics). They are patterned after 'plipconfig.c' from a - suggestion by Alan Cox. Using these ioctls, you can enable promiscuous - mode, add/delete multicast addresses, change the hardware address, get - packet size distribution statistics and muck around with the control and - status register. I'll add others if and when the need arises. - - TO DO: - ------ - - - Revision History - ---------------- - - Version Date Description - - 0.1 26-aug-94 Initial writing. ALPHA code release. - 0.11 31-aug-94 Fixed: 2k mode memory base calc., - LeMAC version calc., - IRQ vector assignments during autoprobe. - 0.12 31-aug-94 Tested working on LeMAC2 (DE20[345]-AC) card. - Fixed up MCA hash table algorithm. - 0.20 4-sep-94 Added IOCTL functionality. - 0.21 14-sep-94 Added I/O mode. - 0.21axp 15-sep-94 Special version for ALPHA AXP Linux V1.0. - 0.22 16-sep-94 Added more IOCTLs & tidied up. - 0.23 21-sep-94 Added transmit cut through. - 0.24 31-oct-94 Added uid checks in some ioctls. - 0.30 1-nov-94 BETA code release. - 0.31 5-dec-94 Added check/allocate region code. - 0.32 16-jan-95 Broadcast packet fix. - 0.33 10-Feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>. - 0.40 27-Dec-95 Rationalise MODULE and autoprobe code. - Rewrite for portability & updated. - ALPHA support from <jestabro@amt.tay1.dec.com> - Added verify_area() calls in ewrk3_ioctl() from - suggestion by <heiko@colossus.escape.de>. - Add new multicasting code. - 0.41 20-Jan-96 Fix IRQ set up problem reported by - <kenneth@bbs.sas.ntu.ac.sg>. - 0.42 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi> - 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c - 0.44 08-Nov-01 use library crc32 functions <Matt_Domsch@dell.com> - 0.45 19-Jul-02 fix unaligned access on alpha <martin@bruli.net> - 0.46 10-Oct-02 Multiple NIC support when module <akropel1@rochester.rr.com> - 0.47 18-Oct-02 ethtool support <akropel1@rochester.rr.com> - 0.48 18-Oct-02 cli/sti removal for 2.5 <vda@port.imtp.ilyichevsk.odessa.ua> - ioctl locking, signature search cleanup <akropel1@rochester.rr.com> - - ========================================================================= - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/crc32.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/ethtool.h> -#include <linux/time.h> -#include <linux/types.h> -#include <linux/unistd.h> -#include <linux/ctype.h> -#include <linux/bitops.h> - -#include <asm/io.h> -#include <asm/dma.h> -#include <asm/uaccess.h> - -#include "ewrk3.h" - -#define DRV_NAME "ewrk3" -#define DRV_VERSION "0.48" - -static char version[] __initdata = -DRV_NAME ":v" DRV_VERSION " 2002/10/18 davies@maniac.ultranet.com\n"; - -#ifdef EWRK3_DEBUG -static int ewrk3_debug = EWRK3_DEBUG; -#else -static int ewrk3_debug = 1; -#endif - -#define EWRK3_NDA 0xffe0 /* No Device Address */ - -#define PROBE_LENGTH 32 -#define ETH_PROM_SIG 0xAA5500FFUL - -#ifndef EWRK3_SIGNATURE -#define EWRK3_SIGNATURE {"DE203","DE204","DE205",""} -#define EWRK3_STRLEN 8 -#endif - -#ifndef EWRK3_RAM_BASE_ADDRESSES -#define EWRK3_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0x00000} -#endif - -/* - ** Sets up the I/O area for the autoprobe. - */ -#define EWRK3_IO_BASE 0x100 /* Start address for probe search */ -#define EWRK3_IOP_INC 0x20 /* I/O address increment */ -#define EWRK3_TOTAL_SIZE 0x20 /* required I/O address length */ - -#ifndef MAX_NUM_EWRK3S -#define MAX_NUM_EWRK3S 21 -#endif - -#ifndef EWRK3_EISA_IO_PORTS -#define EWRK3_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */ -#endif - -#ifndef MAX_EISA_SLOTS -#define MAX_EISA_SLOTS 16 -#define EISA_SLOT_INC 0x1000 -#endif - -#define QUEUE_PKT_TIMEOUT (1*HZ) /* Jiffies */ - -/* - ** EtherWORKS 3 shared memory window sizes - */ -#define IO_ONLY 0x00 -#define SHMEM_2K 0x800 -#define SHMEM_32K 0x8000 -#define SHMEM_64K 0x10000 - -/* - ** EtherWORKS 3 IRQ ENABLE/DISABLE - */ -#define ENABLE_IRQs { \ - icr |= lp->irq_mask;\ - outb(icr, EWRK3_ICR); /* Enable the IRQs */\ -} - -#define DISABLE_IRQs { \ - icr = inb(EWRK3_ICR);\ - icr &= ~lp->irq_mask;\ - outb(icr, EWRK3_ICR); /* Disable the IRQs */\ -} - -/* - ** EtherWORKS 3 START/STOP - */ -#define START_EWRK3 { \ - csr = inb(EWRK3_CSR);\ - csr &= ~(CSR_TXD|CSR_RXD);\ - outb(csr, EWRK3_CSR); /* Enable the TX and/or RX */\ -} - -#define STOP_EWRK3 { \ - csr = (CSR_TXD|CSR_RXD);\ - outb(csr, EWRK3_CSR); /* Disable the TX and/or RX */\ -} - -/* - ** The EtherWORKS 3 private structure - */ -#define EWRK3_PKT_STAT_SZ 16 -#define EWRK3_PKT_BIN_SZ 128 /* Should be >=100 unless you - increase EWRK3_PKT_STAT_SZ */ - -struct ewrk3_stats { - u32 bins[EWRK3_PKT_STAT_SZ]; - u32 unicast; - u32 multicast; - u32 broadcast; - u32 excessive_collisions; - u32 tx_underruns; - u32 excessive_underruns; -}; - -struct ewrk3_private { - char adapter_name[80]; /* Name exported to /proc/ioports */ - u_long shmem_base; /* Shared memory start address */ - void __iomem *shmem; - u_long shmem_length; /* Shared memory window length */ - struct ewrk3_stats pktStats; /* Private stats counters */ - u_char irq_mask; /* Adapter IRQ mask bits */ - u_char mPage; /* Maximum 2kB Page number */ - u_char lemac; /* Chip rev. level */ - u_char hard_strapped; /* Don't allow a full open */ - u_char txc; /* Transmit cut through */ - void __iomem *mctbl; /* Pointer to the multicast table */ - u_char led_mask; /* Used to reserve LED access for ethtool */ - spinlock_t hw_lock; -}; - -/* - ** Force the EtherWORKS 3 card to be in 2kB MODE - */ -#define FORCE_2K_MODE { \ - shmem_length = SHMEM_2K;\ - outb(((mem_start - 0x80000) >> 11), EWRK3_MBR);\ -} - -/* - ** Public Functions - */ -static int ewrk3_open(struct net_device *dev); -static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev); -static irqreturn_t ewrk3_interrupt(int irq, void *dev_id); -static int ewrk3_close(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static const struct ethtool_ops ethtool_ops_203; -static const struct ethtool_ops ethtool_ops; - -/* - ** Private functions - */ -static int ewrk3_hw_init(struct net_device *dev, u_long iobase); -static void ewrk3_init(struct net_device *dev); -static int ewrk3_rx(struct net_device *dev); -static int ewrk3_tx(struct net_device *dev); -static void ewrk3_timeout(struct net_device *dev); - -static void EthwrkSignature(char *name, char *eeprom_image); -static int DevicePresent(u_long iobase); -static void SetMulticastFilter(struct net_device *dev); -static int EISA_signature(char *name, s32 eisa_id); - -static int Read_EEPROM(u_long iobase, u_char eaddr); -static int Write_EEPROM(short data, u_long iobase, u_char eaddr); -static u_char get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType); - -static int ewrk3_probe1(struct net_device *dev, u_long iobase, int irq); -static int isa_probe(struct net_device *dev, u_long iobase); -static int eisa_probe(struct net_device *dev, u_long iobase); - -static u_char irq[MAX_NUM_EWRK3S+1] = {5, 0, 10, 3, 11, 9, 15, 12}; - -static char name[EWRK3_STRLEN + 1]; -static int num_ewrks3s; - -/* - ** Miscellaneous defines... - */ -#define INIT_EWRK3 {\ - outb(EEPROM_INIT, EWRK3_IOPR);\ - mdelay(1);\ -} - -#ifndef MODULE -struct net_device * __init ewrk3_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct ewrk3_private)); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - - err = ewrk3_probe1(dev, dev->base_addr, dev->irq); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); - -} -#endif - -static int __init ewrk3_probe1(struct net_device *dev, u_long iobase, int irq) -{ - int err; - - dev->base_addr = iobase; - dev->irq = irq; - - /* Address PROM pattern */ - err = isa_probe(dev, iobase); - if (err != 0) - err = eisa_probe(dev, iobase); - - if (err) - return err; - - err = register_netdev(dev); - if (err) - release_region(dev->base_addr, EWRK3_TOTAL_SIZE); - - return err; -} - -static const struct net_device_ops ewrk3_netdev_ops = { - .ndo_open = ewrk3_open, - .ndo_start_xmit = ewrk3_queue_pkt, - .ndo_stop = ewrk3_close, - .ndo_set_rx_mode = set_multicast_list, - .ndo_do_ioctl = ewrk3_ioctl, - .ndo_tx_timeout = ewrk3_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __init -ewrk3_hw_init(struct net_device *dev, u_long iobase) -{ - struct ewrk3_private *lp; - int i, status = 0; - u_long mem_start, shmem_length; - u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0; - u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0; - - /* - ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot. - ** This also disables the EISA_ENABLE bit in the EISA Control Register. - */ - if (iobase > 0x400) - eisa_cr = inb(EISA_CR); - INIT_EWRK3; - - nicsr = inb(EWRK3_CSR); - - icr = inb(EWRK3_ICR); - icr &= 0x70; - outb(icr, EWRK3_ICR); /* Disable all the IRQs */ - - if (nicsr != (CSR_TXD | CSR_RXD)) - return -ENXIO; - - /* Check that the EEPROM is alive and well and not living on Pluto... */ - for (chksum = 0, i = 0; i < EEPROM_MAX; i += 2) { - union { - short val; - char c[2]; - } tmp; - - tmp.val = (short) Read_EEPROM(iobase, (i >> 1)); - eeprom_image[i] = tmp.c[0]; - eeprom_image[i + 1] = tmp.c[1]; - chksum += eeprom_image[i] + eeprom_image[i + 1]; - } - - if (chksum != 0) { /* Bad EEPROM Data! */ - printk("%s: Device has a bad on-board EEPROM.\n", dev->name); - return -ENXIO; - } - - EthwrkSignature(name, eeprom_image); - if (*name == '\0') - return -ENXIO; - - dev->base_addr = iobase; - - if (iobase > 0x400) { - outb(eisa_cr, EISA_CR); /* Rewrite the EISA CR */ - } - lemac = eeprom_image[EEPROM_CHIPVER]; - cmr = inb(EWRK3_CMR); - - if (((lemac == LeMAC) && ((cmr & CMR_NO_EEPROM) != CMR_NO_EEPROM)) || - ((lemac == LeMAC2) && !(cmr & CMR_HS))) { - printk("%s: %s at %#4lx", dev->name, name, iobase); - hard_strapped = 1; - } else if ((iobase & 0x0fff) == EWRK3_EISA_IO_PORTS) { - /* EISA slot address */ - printk("%s: %s at %#4lx (EISA slot %ld)", - dev->name, name, iobase, ((iobase >> 12) & 0x0f)); - } else { /* ISA port address */ - printk("%s: %s at %#4lx", dev->name, name, iobase); - } - - printk(", h/w address "); - if (lemac != LeMAC2) - DevicePresent(iobase); /* need after EWRK3_INIT */ - status = get_hw_addr(dev, eeprom_image, lemac); - printk("%pM\n", dev->dev_addr); - - if (status) { - printk(" which has an EEPROM CRC error.\n"); - return -ENXIO; - } - - if (lemac == LeMAC2) { /* Special LeMAC2 CMR things */ - cmr &= ~(CMR_RA | CMR_WB | CMR_LINK | CMR_POLARITY | CMR_0WS); - if (eeprom_image[EEPROM_MISC0] & READ_AHEAD) - cmr |= CMR_RA; - if (eeprom_image[EEPROM_MISC0] & WRITE_BEHIND) - cmr |= CMR_WB; - if (eeprom_image[EEPROM_NETMAN0] & NETMAN_POL) - cmr |= CMR_POLARITY; - if (eeprom_image[EEPROM_NETMAN0] & NETMAN_LINK) - cmr |= CMR_LINK; - if (eeprom_image[EEPROM_MISC0] & _0WS_ENA) - cmr |= CMR_0WS; - } - if (eeprom_image[EEPROM_SETUP] & SETUP_DRAM) - cmr |= CMR_DRAM; - outb(cmr, EWRK3_CMR); - - cr = inb(EWRK3_CR); /* Set up the Control Register */ - cr |= eeprom_image[EEPROM_SETUP] & SETUP_APD; - if (cr & SETUP_APD) - cr |= eeprom_image[EEPROM_SETUP] & SETUP_PS; - cr |= eeprom_image[EEPROM_MISC0] & FAST_BUS; - cr |= eeprom_image[EEPROM_MISC0] & ENA_16; - outb(cr, EWRK3_CR); - - /* - ** Determine the base address and window length for the EWRK3 - ** RAM from the memory base register. - */ - mem_start = inb(EWRK3_MBR); - shmem_length = 0; - if (mem_start != 0) { - if ((mem_start >= 0x0a) && (mem_start <= 0x0f)) { - mem_start *= SHMEM_64K; - shmem_length = SHMEM_64K; - } else if ((mem_start >= 0x14) && (mem_start <= 0x1f)) { - mem_start *= SHMEM_32K; - shmem_length = SHMEM_32K; - } else if ((mem_start >= 0x40) && (mem_start <= 0xff)) { - mem_start = mem_start * SHMEM_2K + 0x80000; - shmem_length = SHMEM_2K; - } else { - return -ENXIO; - } - } - /* - ** See the top of this source code for comments about - ** uncommenting this line. - */ -/* FORCE_2K_MODE; */ - - if (hard_strapped) { - printk(" is hard strapped.\n"); - } else if (mem_start) { - printk(" has a %dk RAM window", (int) (shmem_length >> 10)); - printk(" at 0x%.5lx", mem_start); - } else { - printk(" is in I/O only mode"); - } - - lp = netdev_priv(dev); - lp->shmem_base = mem_start; - lp->shmem = ioremap(mem_start, shmem_length); - if (!lp->shmem) - return -ENOMEM; - lp->shmem_length = shmem_length; - lp->lemac = lemac; - lp->hard_strapped = hard_strapped; - lp->led_mask = CR_LED; - spin_lock_init(&lp->hw_lock); - - lp->mPage = 64; - if (cmr & CMR_DRAM) - lp->mPage <<= 1; /* 2 DRAMS on module */ - - sprintf(lp->adapter_name, "%s (%s)", name, dev->name); - - lp->irq_mask = ICR_TNEM | ICR_TXDM | ICR_RNEM | ICR_RXDM; - - if (!hard_strapped) { - /* - ** Enable EWRK3 board interrupts for autoprobing - */ - icr |= ICR_IE; /* Enable interrupts */ - outb(icr, EWRK3_ICR); - - /* The DMA channel may be passed in on this parameter. */ - dev->dma = 0; - - /* To auto-IRQ we enable the initialization-done and DMA err, - interrupts. For now we will always get a DMA error. */ - if (dev->irq < 2) { -#ifndef MODULE - u_char irqnum; - unsigned long irq_mask; - - - irq_mask = probe_irq_on(); - - /* - ** Trigger a TNE interrupt. - */ - icr |= ICR_TNEM; - outb(1, EWRK3_TDQ); /* Write to the TX done queue */ - outb(icr, EWRK3_ICR); /* Unmask the TXD interrupt */ - - irqnum = irq[((icr & IRQ_SEL) >> 4)]; - - mdelay(20); - dev->irq = probe_irq_off(irq_mask); - if ((dev->irq) && (irqnum == dev->irq)) { - printk(" and uses IRQ%d.\n", dev->irq); - } else { - if (!dev->irq) { - printk(" and failed to detect IRQ line.\n"); - } else if ((irqnum == 1) && (lemac == LeMAC2)) { - printk(" and an illegal IRQ line detected.\n"); - } else { - printk(", but incorrect IRQ line detected.\n"); - } - iounmap(lp->shmem); - return -ENXIO; - } - - DISABLE_IRQs; /* Mask all interrupts */ - -#endif /* MODULE */ - } else { - printk(" and requires IRQ%d.\n", dev->irq); - } - } - - if (ewrk3_debug > 1) { - printk(version); - } - /* The EWRK3-specific entries in the device structure. */ - dev->netdev_ops = &ewrk3_netdev_ops; - if (lp->adapter_name[4] == '3') - SET_ETHTOOL_OPS(dev, ðtool_ops_203); - else - SET_ETHTOOL_OPS(dev, ðtool_ops); - dev->watchdog_timeo = QUEUE_PKT_TIMEOUT; - - dev->mem_start = 0; - - return 0; -} - - -static int ewrk3_open(struct net_device *dev) -{ - struct ewrk3_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int status = 0; - u_char icr, csr; - - /* - ** Stop the TX and RX... - */ - STOP_EWRK3; - - if (!lp->hard_strapped) { - if (request_irq(dev->irq, (void *) ewrk3_interrupt, 0, "ewrk3", dev)) { - printk("ewrk3_open(): Requested IRQ%d is busy\n", dev->irq); - status = -EAGAIN; - } else { - - /* - ** Re-initialize the EWRK3... - */ - ewrk3_init(dev); - - if (ewrk3_debug > 1) { - printk("%s: ewrk3 open with irq %d\n", dev->name, dev->irq); - printk(" physical address: %pM\n", dev->dev_addr); - if (lp->shmem_length == 0) { - printk(" no shared memory, I/O only mode\n"); - } else { - printk(" start of shared memory: 0x%08lx\n", lp->shmem_base); - printk(" window length: 0x%04lx\n", lp->shmem_length); - } - printk(" # of DRAMS: %d\n", ((inb(EWRK3_CMR) & 0x02) ? 2 : 1)); - printk(" csr: 0x%02x\n", inb(EWRK3_CSR)); - printk(" cr: 0x%02x\n", inb(EWRK3_CR)); - printk(" icr: 0x%02x\n", inb(EWRK3_ICR)); - printk(" cmr: 0x%02x\n", inb(EWRK3_CMR)); - printk(" fmqc: 0x%02x\n", inb(EWRK3_FMQC)); - } - netif_start_queue(dev); - /* - ** Unmask EWRK3 board interrupts - */ - icr = inb(EWRK3_ICR); - ENABLE_IRQs; - - } - } else { - printk(KERN_ERR "%s: ewrk3 available for hard strapped set up only.\n", dev->name); - printk(KERN_ERR " Run the 'ewrk3setup' utility or remove the hard straps.\n"); - return -EINVAL; - } - - return status; -} - -/* - ** Initialize the EtherWORKS 3 operating conditions - */ -static void ewrk3_init(struct net_device *dev) -{ - struct ewrk3_private *lp = netdev_priv(dev); - u_char csr, page; - u_long iobase = dev->base_addr; - int i; - - /* - ** Enable any multicasts - */ - set_multicast_list(dev); - - /* - ** Set hardware MAC address. Address is initialized from the EEPROM - ** during startup but may have since been changed by the user. - */ - for (i=0; i<ETH_ALEN; i++) - outb(dev->dev_addr[i], EWRK3_PAR0 + i); - - /* - ** Clean out any remaining entries in all the queues here - */ - while (inb(EWRK3_TQ)); - while (inb(EWRK3_TDQ)); - while (inb(EWRK3_RQ)); - while (inb(EWRK3_FMQ)); - - /* - ** Write a clean free memory queue - */ - for (page = 1; page < lp->mPage; page++) { /* Write the free page numbers */ - outb(page, EWRK3_FMQ); /* to the Free Memory Queue */ - } - - START_EWRK3; /* Enable the TX and/or RX */ -} - -/* - * Transmit timeout - */ - -static void ewrk3_timeout(struct net_device *dev) -{ - struct ewrk3_private *lp = netdev_priv(dev); - u_char icr, csr; - u_long iobase = dev->base_addr; - - if (!lp->hard_strapped) - { - printk(KERN_WARNING"%s: transmit timed/locked out, status %04x, resetting.\n", - dev->name, inb(EWRK3_CSR)); - - /* - ** Mask all board interrupts - */ - DISABLE_IRQs; - - /* - ** Stop the TX and RX... - */ - STOP_EWRK3; - - ewrk3_init(dev); - - /* - ** Unmask EWRK3 board interrupts - */ - ENABLE_IRQs; - - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); - } -} - -/* - ** Writes a socket buffer to the free page queue - */ -static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev) -{ - struct ewrk3_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - void __iomem *buf = NULL; - u_char icr; - u_char page; - - spin_lock_irq (&lp->hw_lock); - DISABLE_IRQs; - - /* if no resources available, exit, request packet be queued */ - if (inb (EWRK3_FMQC) == 0) { - printk (KERN_WARNING "%s: ewrk3_queue_pkt(): No free resources...\n", - dev->name); - printk (KERN_WARNING "%s: ewrk3_queue_pkt(): CSR: %02x ICR: %02x FMQC: %02x\n", - dev->name, inb (EWRK3_CSR), inb (EWRK3_ICR), - inb (EWRK3_FMQC)); - goto err_out; - } - - /* - ** Get a free page from the FMQ - */ - if ((page = inb (EWRK3_FMQ)) >= lp->mPage) { - printk ("ewrk3_queue_pkt(): Invalid free memory page (%d).\n", - (u_char) page); - goto err_out; - } - - - /* - ** Set up shared memory window and pointer into the window - */ - if (lp->shmem_length == IO_ONLY) { - outb (page, EWRK3_IOPR); - } else if (lp->shmem_length == SHMEM_2K) { - buf = lp->shmem; - outb (page, EWRK3_MPR); - } else if (lp->shmem_length == SHMEM_32K) { - buf = (((short) page << 11) & 0x7800) + lp->shmem; - outb ((page >> 4), EWRK3_MPR); - } else if (lp->shmem_length == SHMEM_64K) { - buf = (((short) page << 11) & 0xf800) + lp->shmem; - outb ((page >> 5), EWRK3_MPR); - } else { - printk (KERN_ERR "%s: Oops - your private data area is hosed!\n", - dev->name); - BUG (); - } - - /* - ** Set up the buffer control structures and copy the data from - ** the socket buffer to the shared memory . - */ - if (lp->shmem_length == IO_ONLY) { - int i; - u_char *p = skb->data; - outb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), EWRK3_DATA); - outb ((char) (skb->len & 0xff), EWRK3_DATA); - outb ((char) ((skb->len >> 8) & 0xff), EWRK3_DATA); - outb ((char) 0x04, EWRK3_DATA); - for (i = 0; i < skb->len; i++) { - outb (*p++, EWRK3_DATA); - } - outb (page, EWRK3_TQ); /* Start sending pkt */ - } else { - writeb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), buf); /* ctrl byte */ - buf += 1; - writeb ((char) (skb->len & 0xff), buf); /* length (16 bit xfer) */ - buf += 1; - if (lp->txc) { - writeb(((skb->len >> 8) & 0xff) | XCT, buf); - buf += 1; - writeb (0x04, buf); /* index byte */ - buf += 1; - writeb (0x00, (buf + skb->len)); /* Write the XCT flag */ - memcpy_toio (buf, skb->data, PRELOAD); /* Write PRELOAD bytes */ - outb (page, EWRK3_TQ); /* Start sending pkt */ - memcpy_toio (buf + PRELOAD, - skb->data + PRELOAD, - skb->len - PRELOAD); - writeb (0xff, (buf + skb->len)); /* Write the XCT flag */ - } else { - writeb ((skb->len >> 8) & 0xff, buf); - buf += 1; - writeb (0x04, buf); /* index byte */ - buf += 1; - memcpy_toio (buf, skb->data, skb->len); /* Write data bytes */ - outb (page, EWRK3_TQ); /* Start sending pkt */ - } - } - - ENABLE_IRQs; - spin_unlock_irq (&lp->hw_lock); - - dev->stats.tx_bytes += skb->len; - dev_kfree_skb (skb); - - /* Check for free resources: stop Tx queue if there are none */ - if (inb (EWRK3_FMQC) == 0) - netif_stop_queue (dev); - - return NETDEV_TX_OK; - -err_out: - ENABLE_IRQs; - spin_unlock_irq (&lp->hw_lock); - return NETDEV_TX_BUSY; -} - -/* - ** The EWRK3 interrupt handler. - */ -static irqreturn_t ewrk3_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct ewrk3_private *lp; - u_long iobase; - u_char icr, cr, csr; - - lp = netdev_priv(dev); - iobase = dev->base_addr; - - /* get the interrupt information */ - csr = inb(EWRK3_CSR); - - /* - ** Mask the EWRK3 board interrupts and turn on the LED - */ - spin_lock(&lp->hw_lock); - DISABLE_IRQs; - - cr = inb(EWRK3_CR); - cr |= lp->led_mask; - outb(cr, EWRK3_CR); - - if (csr & CSR_RNE) /* Rx interrupt (packet[s] arrived) */ - ewrk3_rx(dev); - - if (csr & CSR_TNE) /* Tx interrupt (packet sent) */ - ewrk3_tx(dev); - - /* - ** Now deal with the TX/RX disable flags. These are set when there - ** are no more resources. If resources free up then enable these - ** interrupts, otherwise mask them - failure to do this will result - ** in the system hanging in an interrupt loop. - */ - if (inb(EWRK3_FMQC)) { /* any resources available? */ - lp->irq_mask |= ICR_TXDM | ICR_RXDM; /* enable the interrupt source */ - csr &= ~(CSR_TXD | CSR_RXD); /* ensure restart of a stalled TX or RX */ - outb(csr, EWRK3_CSR); - netif_wake_queue(dev); - } else { - lp->irq_mask &= ~(ICR_TXDM | ICR_RXDM); /* disable the interrupt source */ - } - - /* Unmask the EWRK3 board interrupts and turn off the LED */ - cr &= ~(lp->led_mask); - outb(cr, EWRK3_CR); - ENABLE_IRQs; - spin_unlock(&lp->hw_lock); - return IRQ_HANDLED; -} - -/* Called with lp->hw_lock held */ -static int ewrk3_rx(struct net_device *dev) -{ - struct ewrk3_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int i, status = 0; - u_char page; - void __iomem *buf = NULL; - - while (inb(EWRK3_RQC) && !status) { /* Whilst there's incoming data */ - if ((page = inb(EWRK3_RQ)) < lp->mPage) { /* Get next entry's buffer page */ - /* - ** Set up shared memory window and pointer into the window - */ - if (lp->shmem_length == IO_ONLY) { - outb(page, EWRK3_IOPR); - } else if (lp->shmem_length == SHMEM_2K) { - buf = lp->shmem; - outb(page, EWRK3_MPR); - } else if (lp->shmem_length == SHMEM_32K) { - buf = (((short) page << 11) & 0x7800) + lp->shmem; - outb((page >> 4), EWRK3_MPR); - } else if (lp->shmem_length == SHMEM_64K) { - buf = (((short) page << 11) & 0xf800) + lp->shmem; - outb((page >> 5), EWRK3_MPR); - } else { - status = -1; - printk("%s: Oops - your private data area is hosed!\n", dev->name); - } - - if (!status) { - char rx_status; - int pkt_len; - - if (lp->shmem_length == IO_ONLY) { - rx_status = inb(EWRK3_DATA); - pkt_len = inb(EWRK3_DATA); - pkt_len |= ((u_short) inb(EWRK3_DATA) << 8); - } else { - rx_status = readb(buf); - buf += 1; - pkt_len = readw(buf); - buf += 3; - } - - if (!(rx_status & R_ROK)) { /* There was an error. */ - dev->stats.rx_errors++; /* Update the error stats. */ - if (rx_status & R_DBE) - dev->stats.rx_frame_errors++; - if (rx_status & R_CRC) - dev->stats.rx_crc_errors++; - if (rx_status & R_PLL) - dev->stats.rx_fifo_errors++; - } else { - struct sk_buff *skb; - skb = netdev_alloc_skb(dev, - pkt_len + 2); - - if (skb != NULL) { - unsigned char *p; - skb_reserve(skb, 2); /* Align to 16 bytes */ - p = skb_put(skb, pkt_len); - - if (lp->shmem_length == IO_ONLY) { - *p = inb(EWRK3_DATA); /* dummy read */ - for (i = 0; i < pkt_len; i++) { - *p++ = inb(EWRK3_DATA); - } - } else { - memcpy_fromio(p, buf, pkt_len); - } - - for (i = 1; i < EWRK3_PKT_STAT_SZ - 1; i++) { - if (pkt_len < i * EWRK3_PKT_BIN_SZ) { - lp->pktStats.bins[i]++; - i = EWRK3_PKT_STAT_SZ; - } - } - p = skb->data; /* Look at the dest addr */ - if (is_multicast_ether_addr(p)) { - if (is_broadcast_ether_addr(p)) { - lp->pktStats.broadcast++; - } else { - lp->pktStats.multicast++; - } - } else if (ether_addr_equal(p, - dev->dev_addr)) { - lp->pktStats.unicast++; - } - lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ - if (lp->pktStats.bins[0] == 0) { /* Reset counters */ - memset(&lp->pktStats, 0, sizeof(lp->pktStats)); - } - /* - ** Notify the upper protocol layers that there is another - ** packet to handle - */ - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - - /* - ** Update stats - */ - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } else { - printk("%s: Insufficient memory; nuking packet.\n", dev->name); - dev->stats.rx_dropped++; /* Really, deferred. */ - break; - } - } - } - /* - ** Return the received buffer to the free memory queue - */ - outb(page, EWRK3_FMQ); - } else { - printk("ewrk3_rx(): Illegal page number, page %d\n", page); - printk("ewrk3_rx(): CSR: %02x ICR: %02x FMQC: %02x\n", inb(EWRK3_CSR), inb(EWRK3_ICR), inb(EWRK3_FMQC)); - } - } - return status; -} - -/* -** Buffer sent - check for TX buffer errors. -** Called with lp->hw_lock held -*/ -static int ewrk3_tx(struct net_device *dev) -{ - struct ewrk3_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - u_char tx_status; - - while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */ - if (tx_status & T_VSTS) { /* The status is valid */ - if (tx_status & T_TXE) { - dev->stats.tx_errors++; - if (tx_status & T_NCL) - dev->stats.tx_carrier_errors++; - if (tx_status & T_LCL) - dev->stats.tx_window_errors++; - if (tx_status & T_CTU) { - if ((tx_status & T_COLL) ^ T_XUR) { - lp->pktStats.tx_underruns++; - } else { - lp->pktStats.excessive_underruns++; - } - } else if (tx_status & T_COLL) { - if ((tx_status & T_COLL) ^ T_XCOLL) { - dev->stats.collisions++; - } else { - lp->pktStats.excessive_collisions++; - } - } - } else { - dev->stats.tx_packets++; - } - } - } - - return 0; -} - -static int ewrk3_close(struct net_device *dev) -{ - struct ewrk3_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - u_char icr, csr; - - netif_stop_queue(dev); - - if (ewrk3_debug > 1) { - printk("%s: Shutting down ethercard, status was %2.2x.\n", - dev->name, inb(EWRK3_CSR)); - } - /* - ** We stop the EWRK3 here... mask interrupts and stop TX & RX - */ - DISABLE_IRQs; - - STOP_EWRK3; - - /* - ** Clean out the TX and RX queues here (note that one entry - ** may get added to either the TXD or RX queues if the TX or RX - ** just starts processing a packet before the STOP_EWRK3 command - ** is received. This will be flushed in the ewrk3_open() call). - */ - while (inb(EWRK3_TQ)); - while (inb(EWRK3_TDQ)); - while (inb(EWRK3_RQ)); - - if (!lp->hard_strapped) { - free_irq(dev->irq, dev); - } - return 0; -} - -/* - ** Set or clear the multicast filter for this adapter. - */ -static void set_multicast_list(struct net_device *dev) -{ - struct ewrk3_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - u_char csr; - - csr = inb(EWRK3_CSR); - - if (lp->shmem_length == IO_ONLY) { - lp->mctbl = NULL; - } else { - lp->mctbl = lp->shmem + PAGE0_HTE; - } - - csr &= ~(CSR_PME | CSR_MCE); - if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */ - csr |= CSR_PME; - outb(csr, EWRK3_CSR); - } else { - SetMulticastFilter(dev); - csr |= CSR_MCE; - outb(csr, EWRK3_CSR); - } -} - -/* - ** Calculate the hash code and update the logical address filter - ** from a list of ethernet multicast addresses. - ** Little endian crc one liner from Matt Thomas, DEC. - ** - ** Note that when clearing the table, the broadcast bit must remain asserted - ** to receive broadcast messages. - */ -static void SetMulticastFilter(struct net_device *dev) -{ - struct ewrk3_private *lp = netdev_priv(dev); - struct netdev_hw_addr *ha; - u_long iobase = dev->base_addr; - int i; - char bit, byte; - short __iomem *p = lp->mctbl; - u16 hashcode; - u32 crc; - - spin_lock_irq(&lp->hw_lock); - - if (lp->shmem_length == IO_ONLY) { - outb(0, EWRK3_IOPR); - outw(PAGE0_HTE, EWRK3_PIR1); - } else { - outb(0, EWRK3_MPR); - } - - if (dev->flags & IFF_ALLMULTI) { - for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { - if (lp->shmem_length == IO_ONLY) { - outb(0xff, EWRK3_DATA); - } else { /* memset didn't work here */ - writew(0xffff, p); - p++; - i++; - } - } - } else { - /* Clear table except for broadcast bit */ - if (lp->shmem_length == IO_ONLY) { - for (i = 0; i < (HASH_TABLE_LEN >> 4) - 1; i++) { - outb(0x00, EWRK3_DATA); - } - outb(0x80, EWRK3_DATA); - i++; /* insert the broadcast bit */ - for (; i < (HASH_TABLE_LEN >> 3); i++) { - outb(0x00, EWRK3_DATA); - } - } else { - memset_io(lp->mctbl, 0, HASH_TABLE_LEN >> 3); - writeb(0x80, lp->mctbl + (HASH_TABLE_LEN >> 4) - 1); - } - - /* Update table */ - netdev_for_each_mc_addr(ha, dev) { - crc = ether_crc_le(ETH_ALEN, ha->addr); - hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */ - - byte = hashcode >> 3; /* bit[3-8] -> byte in filter */ - bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */ - - if (lp->shmem_length == IO_ONLY) { - u_char tmp; - - outw(PAGE0_HTE + byte, EWRK3_PIR1); - tmp = inb(EWRK3_DATA); - tmp |= bit; - outw(PAGE0_HTE + byte, EWRK3_PIR1); - outb(tmp, EWRK3_DATA); - } else { - writeb(readb(lp->mctbl + byte) | bit, lp->mctbl + byte); - } - } - } - - spin_unlock_irq(&lp->hw_lock); -} - -/* - ** ISA bus I/O device probe - */ -static int __init isa_probe(struct net_device *dev, u_long ioaddr) -{ - int i = num_ewrks3s, maxSlots; - int ret = -ENODEV; - - u_long iobase; - - if (ioaddr >= 0x400) - goto out; - - if (ioaddr == 0) { /* Autoprobing */ - iobase = EWRK3_IO_BASE; /* Get the first slot address */ - maxSlots = 24; - } else { /* Probe a specific location */ - iobase = ioaddr; - maxSlots = i + 1; - } - - for (; (i < maxSlots) && (dev != NULL); - iobase += EWRK3_IOP_INC, i++) - { - if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME)) { - if (DevicePresent(iobase) == 0) { - int irq = dev->irq; - ret = ewrk3_hw_init(dev, iobase); - if (!ret) - break; - dev->irq = irq; - } - release_region(iobase, EWRK3_TOTAL_SIZE); - } - } - out: - - return ret; -} - -/* - ** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually - ** the motherboard. - */ -static int __init eisa_probe(struct net_device *dev, u_long ioaddr) -{ - int i, maxSlots; - u_long iobase; - int ret = -ENODEV; - - if (ioaddr < 0x1000) - goto out; - - iobase = ioaddr; - i = (ioaddr >> 12); - maxSlots = i + 1; - - for (i = 1; (i < maxSlots) && (dev != NULL); i++, iobase += EISA_SLOT_INC) { - if (EISA_signature(name, EISA_ID) == 0) { - if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME) && - DevicePresent(iobase) == 0) { - int irq = dev->irq; - ret = ewrk3_hw_init(dev, iobase); - if (!ret) - break; - dev->irq = irq; - } - release_region(iobase, EWRK3_TOTAL_SIZE); - } - } - - out: - return ret; -} - - -/* - ** Read the EWRK3 EEPROM using this routine - */ -static int Read_EEPROM(u_long iobase, u_char eaddr) -{ - int i; - - outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */ - outb(EEPROM_RD, EWRK3_IOPR); /* issue read command */ - for (i = 0; i < 5000; i++) - inb(EWRK3_CSR); /* wait 1msec */ - - return inw(EWRK3_EPROM1); /* 16 bits data return */ -} - -/* - ** Write the EWRK3 EEPROM using this routine - */ -static int Write_EEPROM(short data, u_long iobase, u_char eaddr) -{ - int i; - - outb(EEPROM_WR_EN, EWRK3_IOPR); /* issue write enable command */ - for (i = 0; i < 5000; i++) - inb(EWRK3_CSR); /* wait 1msec */ - outw(data, EWRK3_EPROM1); /* write data to register */ - outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */ - outb(EEPROM_WR, EWRK3_IOPR); /* issue write command */ - for (i = 0; i < 75000; i++) - inb(EWRK3_CSR); /* wait 15msec */ - outb(EEPROM_WR_DIS, EWRK3_IOPR); /* issue write disable command */ - for (i = 0; i < 5000; i++) - inb(EWRK3_CSR); /* wait 1msec */ - - return 0; -} - -/* - ** Look for a particular board name in the on-board EEPROM. - */ -static void __init EthwrkSignature(char *name, char *eeprom_image) -{ - int i; - char *signatures[] = EWRK3_SIGNATURE; - - for (i=0; *signatures[i] != '\0'; i++) - if( !strncmp(eeprom_image+EEPROM_PNAME7, signatures[i], strlen(signatures[i])) ) - break; - - if (*signatures[i] != '\0') { - memcpy(name, eeprom_image+EEPROM_PNAME7, EWRK3_STRLEN); - name[EWRK3_STRLEN] = '\0'; - } else - name[0] = '\0'; -} - -/* - ** Look for a special sequence in the Ethernet station address PROM that - ** is common across all EWRK3 products. - ** - ** Search the Ethernet address ROM for the signature. Since the ROM address - ** counter can start at an arbitrary point, the search must include the entire - ** probe sequence length plus the (length_of_the_signature - 1). - ** Stop the search IMMEDIATELY after the signature is found so that the - ** PROM address counter is correctly positioned at the start of the - ** ethernet address for later read out. - */ - -static int __init DevicePresent(u_long iobase) -{ - union { - struct { - u32 a; - u32 b; - } llsig; - char Sig[sizeof(u32) << 1]; - } - dev; - short sigLength; - char data; - int i, j, status = 0; - - dev.llsig.a = ETH_PROM_SIG; - dev.llsig.b = ETH_PROM_SIG; - sigLength = sizeof(u32) << 1; - - for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) { - data = inb(EWRK3_APROM); - if (dev.Sig[j] == data) { /* track signature */ - j++; - } else { /* lost signature; begin search again */ - if (data == dev.Sig[0]) { - j = 1; - } else { - j = 0; - } - } - } - - if (j != sigLength) { - status = -ENODEV; /* search failed */ - } - return status; -} - -static u_char __init get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType) -{ - int i, j, k; - u_short chksum; - u_char crc, lfsr, sd, status = 0; - u_long iobase = dev->base_addr; - u16 tmp; - - if (chipType == LeMAC2) { - for (crc = 0x6a, j = 0; j < ETH_ALEN; j++) { - sd = dev->dev_addr[j] = eeprom_image[EEPROM_PADDR0 + j]; - outb(dev->dev_addr[j], EWRK3_PAR0 + j); - for (k = 0; k < 8; k++, sd >>= 1) { - lfsr = ((((crc & 0x02) >> 1) ^ (crc & 0x01)) ^ (sd & 0x01)) << 7; - crc = (crc >> 1) + lfsr; - } - } - if (crc != eeprom_image[EEPROM_PA_CRC]) - status = -1; - } else { - for (i = 0, k = 0; i < ETH_ALEN;) { - k <<= 1; - if (k > 0xffff) - k -= 0xffff; - - k += (u_char) (tmp = inb(EWRK3_APROM)); - dev->dev_addr[i] = (u_char) tmp; - outb(dev->dev_addr[i], EWRK3_PAR0 + i); - i++; - k += (u_short) ((tmp = inb(EWRK3_APROM)) << 8); - dev->dev_addr[i] = (u_char) tmp; - outb(dev->dev_addr[i], EWRK3_PAR0 + i); - i++; - - if (k > 0xffff) - k -= 0xffff; - } - if (k == 0xffff) - k = 0; - chksum = inb(EWRK3_APROM); - chksum |= (inb(EWRK3_APROM) << 8); - if (k != chksum) - status = -1; - } - - return status; -} - -/* - ** Look for a particular board name in the EISA configuration space - */ -static int __init EISA_signature(char *name, s32 eisa_id) -{ - u_long i; - char *signatures[] = EWRK3_SIGNATURE; - char ManCode[EWRK3_STRLEN]; - union { - s32 ID; - char Id[4]; - } Eisa; - int status = 0; - - *name = '\0'; - for (i = 0; i < 4; i++) { - Eisa.Id[i] = inb(eisa_id + i); - } - - ManCode[0] = (((Eisa.Id[0] >> 2) & 0x1f) + 0x40); - ManCode[1] = (((Eisa.Id[1] & 0xe0) >> 5) + ((Eisa.Id[0] & 0x03) << 3) + 0x40); - ManCode[2] = (((Eisa.Id[2] >> 4) & 0x0f) + 0x30); - ManCode[3] = ((Eisa.Id[2] & 0x0f) + 0x30); - ManCode[4] = (((Eisa.Id[3] >> 4) & 0x0f) + 0x30); - ManCode[5] = '\0'; - - for (i = 0; (*signatures[i] != '\0') && (*name == '\0'); i++) { - if (strstr(ManCode, signatures[i]) != NULL) { - strcpy(name, ManCode); - status = 1; - } - } - - return status; /* return the device name string */ -} - -static void ewrk3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - int fwrev = Read_EEPROM(dev->base_addr, EEPROM_REVLVL); - - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->fw_version, "%d", fwrev); - strcpy(info->bus_info, "N/A"); - info->eedump_len = EEPROM_MAX; -} - -static int ewrk3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct ewrk3_private *lp = netdev_priv(dev); - unsigned long iobase = dev->base_addr; - u8 cr = inb(EWRK3_CR); - - switch (lp->adapter_name[4]) { - case '3': /* DE203 */ - ecmd->supported = SUPPORTED_BNC; - ecmd->port = PORT_BNC; - break; - - case '4': /* DE204 */ - ecmd->supported = SUPPORTED_TP; - ecmd->port = PORT_TP; - break; - - case '5': /* DE205 */ - ecmd->supported = SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_AUI; - ecmd->autoneg = !(cr & CR_APD); - /* - ** Port is only valid if autoneg is disabled - ** and even then we don't know if AUI is jumpered. - */ - if (!ecmd->autoneg) - ecmd->port = (cr & CR_PSEL) ? PORT_BNC : PORT_TP; - break; - } - - ecmd->supported |= SUPPORTED_10baseT_Half; - ethtool_cmd_speed_set(ecmd, SPEED_10); - ecmd->duplex = DUPLEX_HALF; - return 0; -} - -static int ewrk3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct ewrk3_private *lp = netdev_priv(dev); - unsigned long iobase = dev->base_addr; - unsigned long flags; - u8 cr; - - /* DE205 is the only card with anything to set */ - if (lp->adapter_name[4] != '5') - return -EOPNOTSUPP; - - /* Sanity-check parameters */ - if (ecmd->speed != SPEED_10) - return -EINVAL; - if (ecmd->port != PORT_TP && ecmd->port != PORT_BNC) - return -EINVAL; /* AUI is not software-selectable */ - if (ecmd->transceiver != XCVR_INTERNAL) - return -EINVAL; - if (ecmd->duplex != DUPLEX_HALF) - return -EINVAL; - if (ecmd->phy_address != 0) - return -EINVAL; - - spin_lock_irqsave(&lp->hw_lock, flags); - cr = inb(EWRK3_CR); - - /* If Autoneg is set, change to Auto Port mode */ - /* Otherwise, disable Auto Port and set port explicitly */ - if (ecmd->autoneg) { - cr &= ~CR_APD; - } else { - cr |= CR_APD; - if (ecmd->port == PORT_TP) - cr &= ~CR_PSEL; /* Force TP */ - else - cr |= CR_PSEL; /* Force BNC */ - } - - /* Commit the changes */ - outb(cr, EWRK3_CR); - spin_unlock_irqrestore(&lp->hw_lock, flags); - return 0; -} - -static u32 ewrk3_get_link(struct net_device *dev) -{ - unsigned long iobase = dev->base_addr; - u8 cmr = inb(EWRK3_CMR); - /* DE203 has BNC only and link status does not apply */ - /* On DE204 this is always valid since TP is the only port. */ - /* On DE205 this reflects TP status even if BNC or AUI is selected. */ - return !(cmr & CMR_LINK); -} - -static int ewrk3_set_phys_id(struct net_device *dev, - enum ethtool_phys_id_state state) -{ - struct ewrk3_private *lp = netdev_priv(dev); - unsigned long iobase = dev->base_addr; - u8 cr; - - spin_lock_irq(&lp->hw_lock); - - switch (state) { - case ETHTOOL_ID_ACTIVE: - /* Prevent ISR from twiddling the LED */ - lp->led_mask = 0; - spin_unlock_irq(&lp->hw_lock); - return 2; /* cycle on/off twice per second */ - - case ETHTOOL_ID_ON: - cr = inb(EWRK3_CR); - outb(cr | CR_LED, EWRK3_CR); - break; - - case ETHTOOL_ID_OFF: - cr = inb(EWRK3_CR); - outb(cr & ~CR_LED, EWRK3_CR); - break; - - case ETHTOOL_ID_INACTIVE: - lp->led_mask = CR_LED; - cr = inb(EWRK3_CR); - outb(cr & ~CR_LED, EWRK3_CR); - } - spin_unlock_irq(&lp->hw_lock); - - return 0; -} - -static const struct ethtool_ops ethtool_ops_203 = { - .get_drvinfo = ewrk3_get_drvinfo, - .get_settings = ewrk3_get_settings, - .set_settings = ewrk3_set_settings, - .set_phys_id = ewrk3_set_phys_id, -}; - -static const struct ethtool_ops ethtool_ops = { - .get_drvinfo = ewrk3_get_drvinfo, - .get_settings = ewrk3_get_settings, - .set_settings = ewrk3_set_settings, - .get_link = ewrk3_get_link, - .set_phys_id = ewrk3_set_phys_id, -}; - -/* - ** Perform IOCTL call functions here. Some are privileged operations and the - ** effective uid is checked in those cases. - */ -static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct ewrk3_private *lp = netdev_priv(dev); - struct ewrk3_ioctl *ioc = (struct ewrk3_ioctl *) &rq->ifr_ifru; - u_long iobase = dev->base_addr; - int i, j, status = 0; - u_char csr; - unsigned long flags; - union ewrk3_addr { - u_char addr[HASH_TABLE_LEN * ETH_ALEN]; - u_short val[(HASH_TABLE_LEN * ETH_ALEN) >> 1]; - }; - - union ewrk3_addr *tmp; - - /* All we handle are private IOCTLs */ - if (cmd != EWRK3IOCTL) - return -EOPNOTSUPP; - - tmp = kmalloc(sizeof(union ewrk3_addr), GFP_KERNEL); - if(tmp==NULL) - return -ENOMEM; - - switch (ioc->cmd) { - case EWRK3_GET_HWADDR: /* Get the hardware address */ - for (i = 0; i < ETH_ALEN; i++) { - tmp->addr[i] = dev->dev_addr[i]; - } - ioc->len = ETH_ALEN; - if (copy_to_user(ioc->data, tmp->addr, ioc->len)) - status = -EFAULT; - break; - - case EWRK3_SET_HWADDR: /* Set the hardware address */ - if (capable(CAP_NET_ADMIN)) { - spin_lock_irqsave(&lp->hw_lock, flags); - csr = inb(EWRK3_CSR); - csr |= (CSR_TXD | CSR_RXD); - outb(csr, EWRK3_CSR); /* Disable the TX and RX */ - spin_unlock_irqrestore(&lp->hw_lock, flags); - - if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN)) { - status = -EFAULT; - break; - } - spin_lock_irqsave(&lp->hw_lock, flags); - for (i = 0; i < ETH_ALEN; i++) { - dev->dev_addr[i] = tmp->addr[i]; - outb(tmp->addr[i], EWRK3_PAR0 + i); - } - - csr = inb(EWRK3_CSR); - csr &= ~(CSR_TXD | CSR_RXD); /* Enable the TX and RX */ - outb(csr, EWRK3_CSR); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } else { - status = -EPERM; - } - - break; - case EWRK3_SET_PROM: /* Set Promiscuous Mode */ - if (capable(CAP_NET_ADMIN)) { - spin_lock_irqsave(&lp->hw_lock, flags); - csr = inb(EWRK3_CSR); - csr |= CSR_PME; - csr &= ~CSR_MCE; - outb(csr, EWRK3_CSR); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } else { - status = -EPERM; - } - - break; - case EWRK3_CLR_PROM: /* Clear Promiscuous Mode */ - if (capable(CAP_NET_ADMIN)) { - spin_lock_irqsave(&lp->hw_lock, flags); - csr = inb(EWRK3_CSR); - csr &= ~CSR_PME; - outb(csr, EWRK3_CSR); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } else { - status = -EPERM; - } - - break; - case EWRK3_GET_MCA: /* Get the multicast address table */ - spin_lock_irqsave(&lp->hw_lock, flags); - if (lp->shmem_length == IO_ONLY) { - outb(0, EWRK3_IOPR); - outw(PAGE0_HTE, EWRK3_PIR1); - for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { - tmp->addr[i] = inb(EWRK3_DATA); - } - } else { - outb(0, EWRK3_MPR); - memcpy_fromio(tmp->addr, lp->shmem + PAGE0_HTE, (HASH_TABLE_LEN >> 3)); - } - spin_unlock_irqrestore(&lp->hw_lock, flags); - - ioc->len = (HASH_TABLE_LEN >> 3); - if (copy_to_user(ioc->data, tmp->addr, ioc->len)) - status = -EFAULT; - - break; - case EWRK3_SET_MCA: /* Set a multicast address */ - if (capable(CAP_NET_ADMIN)) { - if (ioc->len > HASH_TABLE_LEN) { - status = -EINVAL; - break; - } - if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN * ioc->len)) { - status = -EFAULT; - break; - } - set_multicast_list(dev); - } else { - status = -EPERM; - } - - break; - case EWRK3_CLR_MCA: /* Clear all multicast addresses */ - if (capable(CAP_NET_ADMIN)) { - set_multicast_list(dev); - } else { - status = -EPERM; - } - - break; - case EWRK3_MCA_EN: /* Enable multicast addressing */ - if (capable(CAP_NET_ADMIN)) { - spin_lock_irqsave(&lp->hw_lock, flags); - csr = inb(EWRK3_CSR); - csr |= CSR_MCE; - csr &= ~CSR_PME; - outb(csr, EWRK3_CSR); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } else { - status = -EPERM; - } - - break; - case EWRK3_GET_STATS: { /* Get the driver statistics */ - struct ewrk3_stats *tmp_stats = - kmalloc(sizeof(lp->pktStats), GFP_KERNEL); - if (!tmp_stats) { - status = -ENOMEM; - break; - } - - spin_lock_irqsave(&lp->hw_lock, flags); - memcpy(tmp_stats, &lp->pktStats, sizeof(lp->pktStats)); - spin_unlock_irqrestore(&lp->hw_lock, flags); - - ioc->len = sizeof(lp->pktStats); - if (copy_to_user(ioc->data, tmp_stats, sizeof(lp->pktStats))) - status = -EFAULT; - kfree(tmp_stats); - break; - } - case EWRK3_CLR_STATS: /* Zero out the driver statistics */ - if (capable(CAP_NET_ADMIN)) { - spin_lock_irqsave(&lp->hw_lock, flags); - memset(&lp->pktStats, 0, sizeof(lp->pktStats)); - spin_unlock_irqrestore(&lp->hw_lock,flags); - } else { - status = -EPERM; - } - - break; - case EWRK3_GET_CSR: /* Get the CSR Register contents */ - tmp->addr[0] = inb(EWRK3_CSR); - ioc->len = 1; - if (copy_to_user(ioc->data, tmp->addr, ioc->len)) - status = -EFAULT; - break; - case EWRK3_SET_CSR: /* Set the CSR Register contents */ - if (capable(CAP_NET_ADMIN)) { - if (copy_from_user(tmp->addr, ioc->data, 1)) { - status = -EFAULT; - break; - } - outb(tmp->addr[0], EWRK3_CSR); - } else { - status = -EPERM; - } - - break; - case EWRK3_GET_EEPROM: /* Get the EEPROM contents */ - if (capable(CAP_NET_ADMIN)) { - for (i = 0; i < (EEPROM_MAX >> 1); i++) { - tmp->val[i] = (short) Read_EEPROM(iobase, i); - } - i = EEPROM_MAX; - tmp->addr[i++] = inb(EWRK3_CMR); /* Config/Management Reg. */ - for (j = 0; j < ETH_ALEN; j++) { - tmp->addr[i++] = inb(EWRK3_PAR0 + j); - } - ioc->len = EEPROM_MAX + 1 + ETH_ALEN; - if (copy_to_user(ioc->data, tmp->addr, ioc->len)) - status = -EFAULT; - } else { - status = -EPERM; - } - - break; - case EWRK3_SET_EEPROM: /* Set the EEPROM contents */ - if (capable(CAP_NET_ADMIN)) { - if (copy_from_user(tmp->addr, ioc->data, EEPROM_MAX)) { - status = -EFAULT; - break; - } - for (i = 0; i < (EEPROM_MAX >> 1); i++) { - Write_EEPROM(tmp->val[i], iobase, i); - } - } else { - status = -EPERM; - } - - break; - case EWRK3_GET_CMR: /* Get the CMR Register contents */ - tmp->addr[0] = inb(EWRK3_CMR); - ioc->len = 1; - if (copy_to_user(ioc->data, tmp->addr, ioc->len)) - status = -EFAULT; - break; - case EWRK3_SET_TX_CUT_THRU: /* Set TX cut through mode */ - if (capable(CAP_NET_ADMIN)) { - lp->txc = 1; - } else { - status = -EPERM; - } - - break; - case EWRK3_CLR_TX_CUT_THRU: /* Clear TX cut through mode */ - if (capable(CAP_NET_ADMIN)) { - lp->txc = 0; - } else { - status = -EPERM; - } - - break; - default: - status = -EOPNOTSUPP; - } - kfree(tmp); - return status; -} - -#ifdef MODULE -static struct net_device *ewrk3_devs[MAX_NUM_EWRK3S]; -static int ndevs; -static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, }; - -module_param_array(io, int, NULL, 0); -module_param_array(irq, byte, NULL, 0); -MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)"); -MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)"); - -static __exit void ewrk3_exit_module(void) -{ - int i; - - for( i=0; i<ndevs; i++ ) { - struct net_device *dev = ewrk3_devs[i]; - struct ewrk3_private *lp = netdev_priv(dev); - ewrk3_devs[i] = NULL; - unregister_netdev(dev); - release_region(dev->base_addr, EWRK3_TOTAL_SIZE); - iounmap(lp->shmem); - free_netdev(dev); - } -} - -static __init int ewrk3_init_module(void) -{ - int i=0; - - while( io[i] && irq[i] ) { - struct net_device *dev - = alloc_etherdev(sizeof(struct ewrk3_private)); - - if (!dev) - break; - - if (ewrk3_probe1(dev, io[i], irq[i]) != 0) { - free_netdev(dev); - break; - } - - ewrk3_devs[ndevs++] = dev; - i++; - } - - return ndevs ? 0 : -EIO; -} - - -/* Hack for breakage in new module stuff */ -module_exit(ewrk3_exit_module); -module_init(ewrk3_init_module); -#endif /* MODULE */ -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/dec/ewrk3.h b/drivers/net/ethernet/dec/ewrk3.h deleted file mode 100644 index 8e0ee906567b..000000000000 --- a/drivers/net/ethernet/dec/ewrk3.h +++ /dev/null @@ -1,322 +0,0 @@ -/* - Written 1994 by David C. Davies. - - Copyright 1994 Digital Equipment Corporation. - - This software may be used and distributed according to the terms of the - GNU General Public License, incorporated herein by reference. - - The author may be reached as davies@wanton.lkg.dec.com or Digital - Equipment Corporation, 550 King Street, Littleton MA 01460. - - ========================================================================= -*/ - -/* -** I/O Address Register Map -*/ -#define EWRK3_CSR iobase+0x00 /* Control and Status Register */ -#define EWRK3_CR iobase+0x01 /* Control Register */ -#define EWRK3_ICR iobase+0x02 /* Interrupt Control Register */ -#define EWRK3_TSR iobase+0x03 /* Transmit Status Register */ -#define EWRK3_RSVD1 iobase+0x04 /* RESERVED */ -#define EWRK3_RSVD2 iobase+0x05 /* RESERVED */ -#define EWRK3_FMQ iobase+0x06 /* Free Memory Queue */ -#define EWRK3_FMQC iobase+0x07 /* Free Memory Queue Counter */ -#define EWRK3_RQ iobase+0x08 /* Receive Queue */ -#define EWRK3_RQC iobase+0x09 /* Receive Queue Counter */ -#define EWRK3_TQ iobase+0x0a /* Transmit Queue */ -#define EWRK3_TQC iobase+0x0b /* Transmit Queue Counter */ -#define EWRK3_TDQ iobase+0x0c /* Transmit Done Queue */ -#define EWRK3_TDQC iobase+0x0d /* Transmit Done Queue Counter */ -#define EWRK3_PIR1 iobase+0x0e /* Page Index Register 1 */ -#define EWRK3_PIR2 iobase+0x0f /* Page Index Register 2 */ -#define EWRK3_DATA iobase+0x10 /* Data Register */ -#define EWRK3_IOPR iobase+0x11 /* I/O Page Register */ -#define EWRK3_IOBR iobase+0x12 /* I/O Base Register */ -#define EWRK3_MPR iobase+0x13 /* Memory Page Register */ -#define EWRK3_MBR iobase+0x14 /* Memory Base Register */ -#define EWRK3_APROM iobase+0x15 /* Address PROM */ -#define EWRK3_EPROM1 iobase+0x16 /* EEPROM Data Register 1 */ -#define EWRK3_EPROM2 iobase+0x17 /* EEPROM Data Register 2 */ -#define EWRK3_PAR0 iobase+0x18 /* Physical Address Register 0 */ -#define EWRK3_PAR1 iobase+0x19 /* Physical Address Register 1 */ -#define EWRK3_PAR2 iobase+0x1a /* Physical Address Register 2 */ -#define EWRK3_PAR3 iobase+0x1b /* Physical Address Register 3 */ -#define EWRK3_PAR4 iobase+0x1c /* Physical Address Register 4 */ -#define EWRK3_PAR5 iobase+0x1d /* Physical Address Register 5 */ -#define EWRK3_CMR iobase+0x1e /* Configuration/Management Register */ - -/* -** Control Page Map -*/ -#define PAGE0_FMQ 0x000 /* Free Memory Queue */ -#define PAGE0_RQ 0x080 /* Receive Queue */ -#define PAGE0_TQ 0x100 /* Transmit Queue */ -#define PAGE0_TDQ 0x180 /* Transmit Done Queue */ -#define PAGE0_HTE 0x200 /* Hash Table Entries */ -#define PAGE0_RSVD 0x240 /* RESERVED */ -#define PAGE0_USRD 0x600 /* User Data */ - -/* -** Control and Status Register bit definitions (EWRK3_CSR) -*/ -#define CSR_RA 0x80 /* Runt Accept */ -#define CSR_PME 0x40 /* Promiscuous Mode Enable */ -#define CSR_MCE 0x20 /* Multicast Enable */ -#define CSR_TNE 0x08 /* TX Done Queue Not Empty */ -#define CSR_RNE 0x04 /* RX Queue Not Empty */ -#define CSR_TXD 0x02 /* TX Disable */ -#define CSR_RXD 0x01 /* RX Disable */ - -/* -** Control Register bit definitions (EWRK3_CR) -*/ -#define CR_APD 0x80 /* Auto Port Disable */ -#define CR_PSEL 0x40 /* Port Select (0->TP port) */ -#define CR_LBCK 0x20 /* LoopBaCK enable */ -#define CR_FDUP 0x10 /* Full DUPlex enable */ -#define CR_FBUS 0x08 /* Fast BUS enable (ISA clk > 8.33MHz) */ -#define CR_EN_16 0x04 /* ENable 16 bit memory accesses */ -#define CR_LED 0x02 /* LED (1-> turn on) */ - -/* -** Interrupt Control Register bit definitions (EWRK3_ICR) -*/ -#define ICR_IE 0x80 /* Interrupt Enable */ -#define ICR_IS 0x60 /* Interrupt Selected */ -#define ICR_TNEM 0x08 /* TNE Mask (0->mask) */ -#define ICR_RNEM 0x04 /* RNE Mask (0->mask) */ -#define ICR_TXDM 0x02 /* TXD Mask (0->mask) */ -#define ICR_RXDM 0x01 /* RXD Mask (0->mask) */ - -/* -** Transmit Status Register bit definitions (EWRK3_TSR) -*/ -#define TSR_NCL 0x80 /* No Carrier Loopback */ -#define TSR_ID 0x40 /* Initially Deferred */ -#define TSR_LCL 0x20 /* Late CoLlision */ -#define TSR_ECL 0x10 /* Excessive CoLlisions */ -#define TSR_RCNTR 0x0f /* Retries CouNTeR */ - -/* -** I/O Page Register bit definitions (EWRK3_IOPR) -*/ -#define EEPROM_INIT 0xc0 /* EEPROM INIT command */ -#define EEPROM_WR_EN 0xc8 /* EEPROM WRITE ENABLE command */ -#define EEPROM_WR 0xd0 /* EEPROM WRITE command */ -#define EEPROM_WR_DIS 0xd8 /* EEPROM WRITE DISABLE command */ -#define EEPROM_RD 0xe0 /* EEPROM READ command */ - -/* -** I/O Base Register bit definitions (EWRK3_IOBR) -*/ -#define EISA_REGS_EN 0x20 /* Enable EISA ID and Control Registers */ -#define EISA_IOB 0x1f /* Compare bits for I/O Base Address */ - -/* -** I/O Configuration/Management Register bit definitions (EWRK3_CMR) -*/ -#define CMR_RA 0x80 /* Read Ahead */ -#define CMR_WB 0x40 /* Write Behind */ -#define CMR_LINK 0x20 /* 0->TP */ -#define CMR_POLARITY 0x10 /* Informational */ -#define CMR_NO_EEPROM 0x0c /* NO_EEPROM<1:0> pin status */ -#define CMR_HS 0x08 /* Hard Strapped pin status (LeMAC2) */ -#define CMR_PNP 0x04 /* Plug 'n Play */ -#define CMR_DRAM 0x02 /* 0-> 1DRAM, 1-> 2 DRAM on board */ -#define CMR_0WS 0x01 /* Zero Wait State */ - -/* -** MAC Receive Status Register bit definitions -*/ - -#define R_ROK 0x80 /* Receive OK summary */ -#define R_IAM 0x10 /* Individual Address Match */ -#define R_MCM 0x08 /* MultiCast Match */ -#define R_DBE 0x04 /* Dribble Bit Error */ -#define R_CRC 0x02 /* CRC error */ -#define R_PLL 0x01 /* Phase Lock Lost */ - -/* -** MAC Transmit Control Register bit definitions -*/ - -#define TCR_SQEE 0x40 /* SQE Enable - look for heartbeat */ -#define TCR_SED 0x20 /* Stop when Error Detected */ -#define TCR_QMODE 0x10 /* Q_MODE */ -#define TCR_LAB 0x08 /* Less Aggressive Backoff */ -#define TCR_PAD 0x04 /* PAD Runt Packets */ -#define TCR_IFC 0x02 /* Insert Frame Check */ -#define TCR_ISA 0x01 /* Insert Source Address */ - -/* -** MAC Transmit Status Register bit definitions -*/ - -#define T_VSTS 0x80 /* Valid STatuS */ -#define T_CTU 0x40 /* Cut Through Used */ -#define T_SQE 0x20 /* Signal Quality Error */ -#define T_NCL 0x10 /* No Carrier Loopback */ -#define T_LCL 0x08 /* Late Collision */ -#define T_ID 0x04 /* Initially Deferred */ -#define T_COLL 0x03 /* COLLision status */ -#define T_XCOLL 0x03 /* Excessive Collisions */ -#define T_MCOLL 0x02 /* Multiple Collisions */ -#define T_OCOLL 0x01 /* One Collision */ -#define T_NOCOLL 0x00 /* No Collisions */ -#define T_XUR 0x03 /* Excessive Underruns */ -#define T_TXE 0x7f /* TX Errors */ - -/* -** EISA Configuration Register bit definitions -*/ - -#define EISA_ID iobase + 0x0c80 /* EISA ID Registers */ -#define EISA_ID0 iobase + 0x0c80 /* EISA ID Register 0 */ -#define EISA_ID1 iobase + 0x0c81 /* EISA ID Register 1 */ -#define EISA_ID2 iobase + 0x0c82 /* EISA ID Register 2 */ -#define EISA_ID3 iobase + 0x0c83 /* EISA ID Register 3 */ -#define EISA_CR iobase + 0x0c84 /* EISA Control Register */ - -/* -** EEPROM BYTES -*/ -#define EEPROM_MEMB 0x00 -#define EEPROM_IOB 0x01 -#define EEPROM_EISA_ID0 0x02 -#define EEPROM_EISA_ID1 0x03 -#define EEPROM_EISA_ID2 0x04 -#define EEPROM_EISA_ID3 0x05 -#define EEPROM_MISC0 0x06 -#define EEPROM_MISC1 0x07 -#define EEPROM_PNAME7 0x08 -#define EEPROM_PNAME6 0x09 -#define EEPROM_PNAME5 0x0a -#define EEPROM_PNAME4 0x0b -#define EEPROM_PNAME3 0x0c -#define EEPROM_PNAME2 0x0d -#define EEPROM_PNAME1 0x0e -#define EEPROM_PNAME0 0x0f -#define EEPROM_SWFLAGS 0x10 -#define EEPROM_HWCAT 0x11 -#define EEPROM_NETMAN2 0x12 -#define EEPROM_REVLVL 0x13 -#define EEPROM_NETMAN0 0x14 -#define EEPROM_NETMAN1 0x15 -#define EEPROM_CHIPVER 0x16 -#define EEPROM_SETUP 0x17 -#define EEPROM_PADDR0 0x18 -#define EEPROM_PADDR1 0x19 -#define EEPROM_PADDR2 0x1a -#define EEPROM_PADDR3 0x1b -#define EEPROM_PADDR4 0x1c -#define EEPROM_PADDR5 0x1d -#define EEPROM_PA_CRC 0x1e -#define EEPROM_CHKSUM 0x1f - -/* -** EEPROM bytes for checksumming -*/ -#define EEPROM_MAX 32 /* bytes */ - -/* -** EEPROM MISCELLANEOUS FLAGS -*/ -#define RBE_SHADOW 0x0100 /* Remote Boot Enable Shadow */ -#define READ_AHEAD 0x0080 /* Read Ahead feature */ -#define IRQ_SEL2 0x0070 /* IRQ line selection (LeMAC2) */ -#define IRQ_SEL 0x0060 /* IRQ line selection */ -#define FAST_BUS 0x0008 /* ISA Bus speeds > 8.33MHz */ -#define ENA_16 0x0004 /* Enables 16 bit memory transfers */ -#define WRITE_BEHIND 0x0002 /* Write Behind feature */ -#define _0WS_ENA 0x0001 /* Zero Wait State Enable */ - -/* -** EEPROM NETWORK MANAGEMENT FLAGS -*/ -#define NETMAN_POL 0x04 /* Polarity defeat */ -#define NETMAN_LINK 0x02 /* Link defeat */ -#define NETMAN_CCE 0x01 /* Custom Counters Enable */ - -/* -** EEPROM SW FLAGS -*/ -#define SW_SQE 0x10 /* Signal Quality Error */ -#define SW_LAB 0x08 /* Less Aggressive Backoff */ -#define SW_INIT 0x04 /* Initialized */ -#define SW_TIMEOUT 0x02 /* 0:2.5 mins, 1: 30 secs */ -#define SW_REMOTE 0x01 /* Remote Boot Enable -> 1 */ - -/* -** EEPROM SETUP FLAGS -*/ -#define SETUP_APD 0x80 /* AutoPort Disable */ -#define SETUP_PS 0x40 /* Port Select */ -#define SETUP_MP 0x20 /* MultiPort */ -#define SETUP_1TP 0x10 /* 1 port, TP */ -#define SETUP_1COAX 0x00 /* 1 port, Coax */ -#define SETUP_DRAM 0x02 /* Number of DRAMS on board */ - -/* -** EEPROM MANAGEMENT FLAGS -*/ -#define MGMT_CCE 0x01 /* Custom Counters Enable */ - -/* -** EEPROM VERSIONS -*/ -#define LeMAC 0x11 -#define LeMAC2 0x12 - -/* -** Miscellaneous -*/ - -#define EEPROM_WAIT_TIME 1000 /* Number of microseconds */ -#define EISA_EN 0x0001 /* Enable EISA bus buffers */ - -#define HASH_TABLE_LEN 512 /* Bits */ - -#define XCT 0x80 /* Transmit Cut Through */ -#define PRELOAD 16 /* 4 long words */ - -#define MASK_INTERRUPTS 1 -#define UNMASK_INTERRUPTS 0 - -#define EEPROM_OFFSET(a) ((u_short)((u_long)(a))) - -/* -** Include the IOCTL stuff -*/ -#include <linux/sockios.h> - -#define EWRK3IOCTL SIOCDEVPRIVATE - -struct ewrk3_ioctl { - unsigned short cmd; /* Command to run */ - unsigned short len; /* Length of the data buffer */ - unsigned char __user *data; /* Pointer to the data buffer */ -}; - -/* -** Recognised commands for the driver -*/ -#define EWRK3_GET_HWADDR 0x01 /* Get the hardware address */ -#define EWRK3_SET_HWADDR 0x02 /* Get the hardware address */ -#define EWRK3_SET_PROM 0x03 /* Set Promiscuous Mode */ -#define EWRK3_CLR_PROM 0x04 /* Clear Promiscuous Mode */ -#define EWRK3_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */ -#define EWRK3_GET_MCA 0x06 /* Get a multicast address */ -#define EWRK3_SET_MCA 0x07 /* Set a multicast address */ -#define EWRK3_CLR_MCA 0x08 /* Clear a multicast address */ -#define EWRK3_MCA_EN 0x09 /* Enable a multicast address group */ -#define EWRK3_GET_STATS 0x0a /* Get the driver statistics */ -#define EWRK3_CLR_STATS 0x0b /* Zero out the driver statistics */ -#define EWRK3_GET_CSR 0x0c /* Get the CSR Register contents */ -#define EWRK3_SET_CSR 0x0d /* Set the CSR Register contents */ -#define EWRK3_GET_EEPROM 0x0e /* Get the EEPROM contents */ -#define EWRK3_SET_EEPROM 0x0f /* Set the EEPROM contents */ -#define EWRK3_GET_CMR 0x10 /* Get the CMR Register contents */ -#define EWRK3_CLR_TX_CUT_THRU 0x11 /* Clear the TX cut through mode */ -#define EWRK3_SET_TX_CUT_THRU 0x12 /* Set the TX cut through mode */ diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig index b5afe218c31b..ee26ce78e270 100644 --- a/drivers/net/ethernet/dlink/Kconfig +++ b/drivers/net/ethernet/dlink/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_DLINK bool "D-Link devices" default y - depends on PCI || PARPORT + depends on PCI ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -18,36 +18,6 @@ config NET_VENDOR_DLINK if NET_VENDOR_DLINK -config DE600 - tristate "D-Link DE600 pocket adapter support" - depends on PARPORT - ---help--- - This is a network (Ethernet) device which attaches to your parallel - port. Read <file:Documentation/networking/DLINK.txt> as well as the - Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>, if you want to use - this. It is possible to have several devices share a single parallel - port and it is safe to compile the corresponding drivers into the - kernel. - - To compile this driver as a module, choose M here: the module - will be called de600. - -config DE620 - tristate "D-Link DE620 pocket adapter support" - depends on PARPORT - ---help--- - This is a network (Ethernet) device which attaches to your parallel - port. Read <file:Documentation/networking/DLINK.txt> as well as the - Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>, if you want to use - this. It is possible to have several devices share a single parallel - port and it is safe to compile the corresponding drivers into the - kernel. - - To compile this driver as a module, choose M here: the module - will be called de620. - config DL2K tristate "DL2000/TC902x-based Gigabit Ethernet support" depends on PCI diff --git a/drivers/net/ethernet/dlink/Makefile b/drivers/net/ethernet/dlink/Makefile index c705eaa4f5b2..40085f67157b 100644 --- a/drivers/net/ethernet/dlink/Makefile +++ b/drivers/net/ethernet/dlink/Makefile @@ -2,7 +2,5 @@ # Makefile for the D-Link network device drivers. # -obj-$(CONFIG_DE600) += de600.o -obj-$(CONFIG_DE620) += de620.o obj-$(CONFIG_DL2K) += dl2k.o obj-$(CONFIG_SUNDANCE) += sundance.o diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c deleted file mode 100644 index 414f0eea1049..000000000000 --- a/drivers/net/ethernet/dlink/de600.c +++ /dev/null @@ -1,529 +0,0 @@ -static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj0rn@blox.se)\n"; -/* - * de600.c - * - * Linux driver for the D-Link DE-600 Ethernet pocket adapter. - * - * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall - * The Author may be reached as bj0rn@blox.se - * - * Based on adapter information gathered from DE600.ASM by D-Link Inc., - * as included on disk C in the v.2.11 of PC/TCP from FTP Software. - * For DE600.asm: - * Portions (C) Copyright 1990 D-Link, Inc. - * Copyright, 1988-1992, Russell Nelson, Crynwr Software - * - * Adapted to the sample network driver core for linux, - * written by: Donald Becker <becker@super.org> - * (Now at <becker@scyld.com>) - * - **************************************************************/ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - **************************************************************/ - -/* Add more time here if your adapter won't work OK: */ -#define DE600_SLOW_DOWN udelay(delay_time) - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/string.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/inet.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> - -#include <asm/io.h> - -#include "de600.h" - -static bool check_lost = true; -module_param(check_lost, bool, 0); -MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600"); - -static unsigned int delay_time = 10; -module_param(delay_time, int, 0); -MODULE_PARM_DESC(delay_time, "DE-600 deley on I/O in microseconds"); - - -/* - * D-Link driver variables: - */ - -static volatile int rx_page; - -#define TX_PAGES 2 -static volatile int tx_fifo[TX_PAGES]; -static volatile int tx_fifo_in; -static volatile int tx_fifo_out; -static volatile int free_tx_pages = TX_PAGES; -static int was_down; -static DEFINE_SPINLOCK(de600_lock); - -static inline u8 de600_read_status(struct net_device *dev) -{ - u8 status; - - outb_p(STATUS, DATA_PORT); - status = inb(STATUS_PORT); - outb_p(NULL_COMMAND | HI_NIBBLE, DATA_PORT); - - return status; -} - -static inline u8 de600_read_byte(unsigned char type, struct net_device *dev) -{ - /* dev used by macros */ - u8 lo; - outb_p((type), DATA_PORT); - lo = ((unsigned char)inb(STATUS_PORT)) >> 4; - outb_p((type) | HI_NIBBLE, DATA_PORT); - return ((unsigned char)inb(STATUS_PORT) & (unsigned char)0xf0) | lo; -} - -/* - * Open/initialize the board. This is called (in the current kernel) - * after booting when 'ifconfig <dev->name> $IP_ADDR' is run (in rc.inet1). - * - * This routine should set everything up anew at each open, even - * registers that "should" only need to be set once at boot, so that - * there is a non-reboot way to recover if something goes wrong. - */ - -static int de600_open(struct net_device *dev) -{ - unsigned long flags; - int ret = request_irq(DE600_IRQ, de600_interrupt, 0, dev->name, dev); - if (ret) { - printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, DE600_IRQ); - return ret; - } - spin_lock_irqsave(&de600_lock, flags); - ret = adapter_init(dev); - spin_unlock_irqrestore(&de600_lock, flags); - return ret; -} - -/* - * The inverse routine to de600_open(). - */ - -static int de600_close(struct net_device *dev) -{ - select_nic(); - rx_page = 0; - de600_put_command(RESET); - de600_put_command(STOP_RESET); - de600_put_command(0); - select_prn(); - free_irq(DE600_IRQ, dev); - return 0; -} - -static inline void trigger_interrupt(struct net_device *dev) -{ - de600_put_command(FLIP_IRQ); - select_prn(); - DE600_SLOW_DOWN; - select_nic(); - de600_put_command(0); -} - -/* - * Copy a buffer to the adapter transmit page memory. - * Start sending. - */ - -static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - unsigned long flags; - int transmit_from; - int len; - int tickssofar; - u8 *buffer = skb->data; - int i; - - if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */ - tickssofar = jiffies - dev_trans_start(dev); - if (tickssofar < HZ/20) - return NETDEV_TX_BUSY; - /* else */ - printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem"); - /* Restart the adapter. */ - spin_lock_irqsave(&de600_lock, flags); - if (adapter_init(dev)) { - spin_unlock_irqrestore(&de600_lock, flags); - return NETDEV_TX_BUSY; - } - spin_unlock_irqrestore(&de600_lock, flags); - } - - /* Start real output */ - pr_debug("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages); - - if ((len = skb->len) < RUNT) - len = RUNT; - - spin_lock_irqsave(&de600_lock, flags); - select_nic(); - tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len; - tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */ - - if(check_lost) - { - /* This costs about 40 instructions per packet... */ - de600_setup_address(NODE_ADDRESS, RW_ADDR); - de600_read_byte(READ_DATA, dev); - if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) { - if (adapter_init(dev)) { - spin_unlock_irqrestore(&de600_lock, flags); - return NETDEV_TX_BUSY; - } - } - } - - de600_setup_address(transmit_from, RW_ADDR); - for (i = 0; i < skb->len ; ++i, ++buffer) - de600_put_byte(*buffer); - for (; i < len; ++i) - de600_put_byte(0); - - if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */ - dev->trans_start = jiffies; - netif_start_queue(dev); /* allow more packets into adapter */ - /* Send page and generate a faked interrupt */ - de600_setup_address(transmit_from, TX_ADDR); - de600_put_command(TX_ENABLE); - } - else { - if (free_tx_pages) - netif_start_queue(dev); - else - netif_stop_queue(dev); - select_prn(); - } - spin_unlock_irqrestore(&de600_lock, flags); - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -/* - * The typical workload of the driver: - * Handle the network interface interrupts. - */ - -static irqreturn_t de600_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - u8 irq_status; - int retrig = 0; - int boguscount = 0; - - spin_lock(&de600_lock); - - select_nic(); - irq_status = de600_read_status(dev); - - do { - pr_debug("de600_interrupt (%02X)\n", irq_status); - - if (irq_status & RX_GOOD) - de600_rx_intr(dev); - else if (!(irq_status & RX_BUSY)) - de600_put_command(RX_ENABLE); - - /* Any transmission in progress? */ - if (free_tx_pages < TX_PAGES) - retrig = de600_tx_intr(dev, irq_status); - else - retrig = 0; - - irq_status = de600_read_status(dev); - } while ( (irq_status & RX_GOOD) || ((++boguscount < 100) && retrig) ); - /* - * Yeah, it _looks_ like busy waiting, smells like busy waiting - * and I know it's not PC, but please, it will only occur once - * in a while and then only for a loop or so (< 1ms for sure!) - */ - - /* Enable adapter interrupts */ - select_prn(); - if (retrig) - trigger_interrupt(dev); - spin_unlock(&de600_lock); - return IRQ_HANDLED; -} - -static int de600_tx_intr(struct net_device *dev, int irq_status) -{ - /* - * Returns 1 if tx still not done - */ - - /* Check if current transmission is done yet */ - if (irq_status & TX_BUSY) - return 1; /* tx not done, try again */ - - /* else */ - /* If last transmission OK then bump fifo index */ - if (!(irq_status & TX_FAILED16)) { - tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES; - ++free_tx_pages; - dev->stats.tx_packets++; - netif_wake_queue(dev); - } - - /* More to send, or resend last packet? */ - if ((free_tx_pages < TX_PAGES) || (irq_status & TX_FAILED16)) { - dev->trans_start = jiffies; - de600_setup_address(tx_fifo[tx_fifo_out], TX_ADDR); - de600_put_command(TX_ENABLE); - return 1; - } - /* else */ - - return 0; -} - -/* - * We have a good packet, get it out of the adapter. - */ -static void de600_rx_intr(struct net_device *dev) -{ - struct sk_buff *skb; - int i; - int read_from; - int size; - unsigned char *buffer; - - /* Get size of received packet */ - size = de600_read_byte(RX_LEN, dev); /* low byte */ - size += (de600_read_byte(RX_LEN, dev) << 8); /* high byte */ - size -= 4; /* Ignore trailing 4 CRC-bytes */ - - /* Tell adapter where to store next incoming packet, enable receiver */ - read_from = rx_page_adr(); - next_rx_page(); - de600_put_command(RX_ENABLE); - - if ((size < 32) || (size > 1535)) { - printk(KERN_WARNING "%s: Bogus packet size %d.\n", dev->name, size); - if (size > 10000) - adapter_init(dev); - return; - } - - skb = netdev_alloc_skb(dev, size + 2); - if (skb == NULL) { - printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); - return; - } - /* else */ - - skb_reserve(skb,2); /* Align */ - - /* 'skb->data' points to the start of sk_buff data area. */ - buffer = skb_put(skb,size); - - /* copy the packet into the buffer */ - de600_setup_address(read_from, RW_ADDR); - for (i = size; i > 0; --i, ++buffer) - *buffer = de600_read_byte(READ_DATA, dev); - - skb->protocol=eth_type_trans(skb,dev); - - netif_rx(skb); - - /* update stats */ - dev->stats.rx_packets++; /* count all receives */ - dev->stats.rx_bytes += size; /* count all received bytes */ - - /* - * If any worth-while packets have been received, netif_rx() - * will work on them when we get to the tasklets. - */ -} - -static const struct net_device_ops de600_netdev_ops = { - .ndo_open = de600_open, - .ndo_stop = de600_close, - .ndo_start_xmit = de600_start_xmit, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - - -static struct net_device * __init de600_probe(void) -{ - int i; - struct net_device *dev; - int err; - - dev = alloc_etherdev(0); - if (!dev) - return ERR_PTR(-ENOMEM); - - - if (!request_region(DE600_IO, 3, "de600")) { - printk(KERN_WARNING "DE600: port 0x%x busy\n", DE600_IO); - err = -EBUSY; - goto out; - } - - printk(KERN_INFO "%s: D-Link DE-600 pocket adapter", dev->name); - /* Alpha testers must have the version number to report bugs. */ - pr_debug("%s", version); - - /* probe for adapter */ - err = -ENODEV; - rx_page = 0; - select_nic(); - (void)de600_read_status(dev); - de600_put_command(RESET); - de600_put_command(STOP_RESET); - if (de600_read_status(dev) & 0xf0) { - printk(": not at I/O %#3x.\n", DATA_PORT); - goto out1; - } - - /* - * Maybe we found one, - * have to check if it is a D-Link DE-600 adapter... - */ - - /* Get the adapter ethernet address from the ROM */ - de600_setup_address(NODE_ADDRESS, RW_ADDR); - for (i = 0; i < ETH_ALEN; i++) { - dev->dev_addr[i] = de600_read_byte(READ_DATA, dev); - dev->broadcast[i] = 0xff; - } - - /* Check magic code */ - if ((dev->dev_addr[1] == 0xde) && (dev->dev_addr[2] == 0x15)) { - /* OK, install real address */ - dev->dev_addr[0] = 0x00; - dev->dev_addr[1] = 0x80; - dev->dev_addr[2] = 0xc8; - dev->dev_addr[3] &= 0x0f; - dev->dev_addr[3] |= 0x70; - } else { - printk(" not identified in the printer port\n"); - goto out1; - } - - printk(", Ethernet Address: %pM\n", dev->dev_addr); - - dev->netdev_ops = &de600_netdev_ops; - - dev->flags&=~IFF_MULTICAST; - - select_prn(); - - err = register_netdev(dev); - if (err) - goto out1; - - return dev; - -out1: - release_region(DE600_IO, 3); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static int adapter_init(struct net_device *dev) -{ - int i; - - select_nic(); - rx_page = 0; /* used by RESET */ - de600_put_command(RESET); - de600_put_command(STOP_RESET); - - /* Check if it is still there... */ - /* Get the some bytes of the adapter ethernet address from the ROM */ - de600_setup_address(NODE_ADDRESS, RW_ADDR); - de600_read_byte(READ_DATA, dev); - if ((de600_read_byte(READ_DATA, dev) != 0xde) || - (de600_read_byte(READ_DATA, dev) != 0x15)) { - /* was: if (de600_read_status(dev) & 0xf0) { */ - printk("Something has happened to the DE-600! Please check it and do a new ifconfig!\n"); - /* Goodbye, cruel world... */ - dev->flags &= ~IFF_UP; - de600_close(dev); - was_down = 1; - netif_stop_queue(dev); /* Transmit busy... */ - return 1; /* failed */ - } - - if (was_down) { - printk(KERN_INFO "%s: Thanks, I feel much better now!\n", dev->name); - was_down = 0; - } - - tx_fifo_in = 0; - tx_fifo_out = 0; - free_tx_pages = TX_PAGES; - - - /* set the ether address. */ - de600_setup_address(NODE_ADDRESS, RW_ADDR); - for (i = 0; i < ETH_ALEN; i++) - de600_put_byte(dev->dev_addr[i]); - - /* where to start saving incoming packets */ - rx_page = RX_BP | RX_BASE_PAGE; - de600_setup_address(MEM_4K, RW_ADDR); - /* Enable receiver */ - de600_put_command(RX_ENABLE); - select_prn(); - - netif_start_queue(dev); - - return 0; /* OK */ -} - -static struct net_device *de600_dev; - -static int __init de600_init(void) -{ - de600_dev = de600_probe(); - if (IS_ERR(de600_dev)) - return PTR_ERR(de600_dev); - return 0; -} - -static void __exit de600_exit(void) -{ - unregister_netdev(de600_dev); - release_region(DE600_IO, 3); - free_netdev(de600_dev); -} - -module_init(de600_init); -module_exit(de600_exit); - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/dlink/de600.h b/drivers/net/ethernet/dlink/de600.h deleted file mode 100644 index e80ecbabcf4e..000000000000 --- a/drivers/net/ethernet/dlink/de600.h +++ /dev/null @@ -1,168 +0,0 @@ -/************************************************** - * * - * Definition of D-Link Ethernet Pocket adapter * - * * - **************************************************/ -/* - * D-Link Ethernet pocket adapter ports - */ -/* - * OK, so I'm cheating, but there are an awful lot of - * reads and writes in order to get anything in and out - * of the DE-600 with 4 bits at a time in the parallel port, - * so every saved instruction really helps :-) - */ - -#ifndef DE600_IO -#define DE600_IO 0x378 -#endif - -#define DATA_PORT (DE600_IO) -#define STATUS_PORT (DE600_IO + 1) -#define COMMAND_PORT (DE600_IO + 2) - -#ifndef DE600_IRQ -#define DE600_IRQ 7 -#endif -/* - * It really should look like this, and autoprobing as well... - * -#define DATA_PORT (dev->base_addr + 0) -#define STATUS_PORT (dev->base_addr + 1) -#define COMMAND_PORT (dev->base_addr + 2) -#define DE600_IRQ dev->irq - */ - -/* - * D-Link COMMAND_PORT commands - */ -#define SELECT_NIC 0x04 /* select Network Interface Card */ -#define SELECT_PRN 0x1c /* select Printer */ -#define NML_PRN 0xec /* normal Printer situation */ -#define IRQEN 0x10 /* enable IRQ line */ - -/* - * D-Link STATUS_PORT - */ -#define RX_BUSY 0x80 -#define RX_GOOD 0x40 -#define TX_FAILED16 0x10 -#define TX_BUSY 0x08 - -/* - * D-Link DATA_PORT commands - * command in low 4 bits - * data in high 4 bits - * select current data nibble with HI_NIBBLE bit - */ -#define WRITE_DATA 0x00 /* write memory */ -#define READ_DATA 0x01 /* read memory */ -#define STATUS 0x02 /* read status register */ -#define COMMAND 0x03 /* write command register (see COMMAND below) */ -#define NULL_COMMAND 0x04 /* null command */ -#define RX_LEN 0x05 /* read received packet length */ -#define TX_ADDR 0x06 /* set adapter transmit memory address */ -#define RW_ADDR 0x07 /* set adapter read/write memory address */ -#define HI_NIBBLE 0x08 /* read/write the high nibble of data, - or-ed with rest of command */ - -/* - * command register, accessed through DATA_PORT with low bits = COMMAND - */ -#define RX_ALL 0x01 /* PROMISCUOUS */ -#define RX_BP 0x02 /* default: BROADCAST & PHYSICAL ADDRESS */ -#define RX_MBP 0x03 /* MULTICAST, BROADCAST & PHYSICAL ADDRESS */ - -#define TX_ENABLE 0x04 /* bit 2 */ -#define RX_ENABLE 0x08 /* bit 3 */ - -#define RESET 0x80 /* set bit 7 high */ -#define STOP_RESET 0x00 /* set bit 7 low */ - -/* - * data to command register - * (high 4 bits in write to DATA_PORT) - */ -#define RX_PAGE2_SELECT 0x10 /* bit 4, only 2 pages to select */ -#define RX_BASE_PAGE 0x20 /* bit 5, always set when specifying RX_ADDR */ -#define FLIP_IRQ 0x40 /* bit 6 */ - -/* - * D-Link adapter internal memory: - * - * 0-2K 1:st transmit page (send from pointer up to 2K) - * 2-4K 2:nd transmit page (send from pointer up to 4K) - * - * 4-6K 1:st receive page (data from 4K upwards) - * 6-8K 2:nd receive page (data from 6K upwards) - * - * 8K+ Adapter ROM (contains magic code and last 3 bytes of Ethernet address) - */ -#define MEM_2K 0x0800 /* 2048 */ -#define MEM_4K 0x1000 /* 4096 */ -#define MEM_6K 0x1800 /* 6144 */ -#define NODE_ADDRESS 0x2000 /* 8192 */ - -#define RUNT 60 /* Too small Ethernet packet */ - -/************************************************** - * * - * End of definition * - * * - **************************************************/ - -/* - * Index to functions, as function prototypes. - */ -/* Routines used internally. (See "convenience macros") */ -static u8 de600_read_status(struct net_device *dev); -static u8 de600_read_byte(unsigned char type, struct net_device *dev); - -/* Put in the device structure. */ -static int de600_open(struct net_device *dev); -static int de600_close(struct net_device *dev); -static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev); - -/* Dispatch from interrupts. */ -static irqreturn_t de600_interrupt(int irq, void *dev_id); -static int de600_tx_intr(struct net_device *dev, int irq_status); -static void de600_rx_intr(struct net_device *dev); - -/* Initialization */ -static void trigger_interrupt(struct net_device *dev); -static int adapter_init(struct net_device *dev); - -/* - * Convenience macros/functions for D-Link adapter - */ - -#define select_prn() outb_p(SELECT_PRN, COMMAND_PORT); DE600_SLOW_DOWN -#define select_nic() outb_p(SELECT_NIC, COMMAND_PORT); DE600_SLOW_DOWN - -/* Thanks for hints from Mark Burton <markb@ordern.demon.co.uk> */ -#define de600_put_byte(data) ( \ - outb_p(((data) << 4) | WRITE_DATA , DATA_PORT), \ - outb_p(((data) & 0xf0) | WRITE_DATA | HI_NIBBLE, DATA_PORT)) - -/* - * The first two outb_p()'s below could perhaps be deleted if there - * would be more delay in the last two. Not certain about it yet... - */ -#define de600_put_command(cmd) ( \ - outb_p(( rx_page << 4) | COMMAND , DATA_PORT), \ - outb_p(( rx_page & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT), \ - outb_p(((rx_page | cmd) << 4) | COMMAND , DATA_PORT), \ - outb_p(((rx_page | cmd) & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT)) - -#define de600_setup_address(addr,type) ( \ - outb_p((((addr) << 4) & 0xf0) | type , DATA_PORT), \ - outb_p(( (addr) & 0xf0) | type | HI_NIBBLE, DATA_PORT), \ - outb_p((((addr) >> 4) & 0xf0) | type , DATA_PORT), \ - outb_p((((addr) >> 8) & 0xf0) | type | HI_NIBBLE, DATA_PORT)) - -#define rx_page_adr() ((rx_page & RX_PAGE2_SELECT)?(MEM_6K):(MEM_4K)) - -/* Flip bit, only 2 pages */ -#define next_rx_page() (rx_page ^= RX_PAGE2_SELECT) - -#define tx_page_adr(a) (((a) + 1) * MEM_2K) diff --git a/drivers/net/ethernet/dlink/de620.c b/drivers/net/ethernet/dlink/de620.c deleted file mode 100644 index 2e2bc60ee811..000000000000 --- a/drivers/net/ethernet/dlink/de620.c +++ /dev/null @@ -1,987 +0,0 @@ -/* - * de620.c $Revision: 1.40 $ BETA - * - * - * Linux driver for the D-Link DE-620 Ethernet pocket adapter. - * - * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall <bj0rn@blox.se> - * - * Based on adapter information gathered from DOS packetdriver - * sources from D-Link Inc: (Special thanks to Henry Ngai of D-Link.) - * Portions (C) Copyright D-Link SYSTEM Inc. 1991, 1992 - * Copyright, 1988, Russell Nelson, Crynwr Software - * - * Adapted to the sample network driver core for linux, - * written by: Donald Becker <becker@super.org> - * (Now at <becker@scyld.com>) - * - * Valuable assistance from: - * J. Joshua Kopper <kopper@rtsg.mot.com> - * Olav Kvittem <Olav.Kvittem@uninett.no> - * Germano Caronni <caronni@nessie.cs.id.ethz.ch> - * Jeremy Fitzhardinge <jeremy@suite.sw.oz.au> - * - *****************************************************************************/ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - *****************************************************************************/ -static const char version[] = - "de620.c: $Revision: 1.40 $, Bjorn Ekwall <bj0rn@blox.se>\n"; - -/*********************************************************************** - * - * "Tuning" section. - * - * Compile-time options: (see below for descriptions) - * -DDE620_IO=0x378 (lpt1) - * -DDE620_IRQ=7 (lpt1) - * -DSHUTDOWN_WHEN_LOST - * -DCOUNT_LOOPS - * -DLOWSPEED - * -DREAD_DELAY - * -DWRITE_DELAY - */ - -/* - * This driver assumes that the printer port is a "normal", - * dumb, uni-directional port! - * If your port is "fancy" in any way, please try to set it to "normal" - * with your BIOS setup. I have no access to machines with bi-directional - * ports, so I can't test such a driver :-( - * (Yes, I _know_ it is possible to use DE620 with bidirectional ports...) - * - * There are some clones of DE620 out there, with different names. - * If the current driver does not recognize a clone, try to change - * the following #define to: - * - * #define DE620_CLONE 1 - */ -#define DE620_CLONE 0 - -/* - * If the adapter has problems with high speeds, enable this #define - * otherwise full printerport speed will be attempted. - * - * You can tune the READ_DELAY/WRITE_DELAY below if you enable LOWSPEED - * -#define LOWSPEED - */ - -#ifndef READ_DELAY -#define READ_DELAY 100 /* adapter internal read delay in 100ns units */ -#endif - -#ifndef WRITE_DELAY -#define WRITE_DELAY 100 /* adapter internal write delay in 100ns units */ -#endif - -/* - * Enable this #define if you want the adapter to do a "ifconfig down" on - * itself when we have detected that something is possibly wrong with it. - * The default behaviour is to retry with "adapter_init()" until success. - * This should be used for debugging purposes only. - * -#define SHUTDOWN_WHEN_LOST - */ - -#ifdef LOWSPEED -/* - * Enable this #define if you want to see debugging output that show how long - * we have to wait before the DE-620 is ready for the next read/write/command. - * -#define COUNT_LOOPS - */ -#endif - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/string.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/inet.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> - -#include <asm/io.h> - -/* Constant definitions for the DE-620 registers, commands and bits */ -#include "de620.h" - -typedef unsigned char byte; - -/******************************************************* - * * - * Definition of D-Link DE-620 Ethernet Pocket adapter * - * See also "de620.h" * - * * - *******************************************************/ -#ifndef DE620_IO /* Compile-time configurable */ -#define DE620_IO 0x378 -#endif - -#ifndef DE620_IRQ /* Compile-time configurable */ -#define DE620_IRQ 7 -#endif - -#define DATA_PORT (dev->base_addr) -#define STATUS_PORT (dev->base_addr + 1) -#define COMMAND_PORT (dev->base_addr + 2) - -#define RUNT 60 /* Too small Ethernet packet */ -#define GIANT 1514 /* largest legal size packet, no fcs */ - -/* - * Force media with insmod: - * insmod de620.o bnc=1 - * or - * insmod de620.o utp=1 - * - * Force io and/or irq with insmod: - * insmod de620.o io=0x378 irq=7 - * - * Make a clone skip the Ethernet-address range check: - * insmod de620.o clone=1 - */ -static int bnc; -static int utp; -static int io = DE620_IO; -static int irq = DE620_IRQ; -static int clone = DE620_CLONE; - -static spinlock_t de620_lock; - -module_param(bnc, int, 0); -module_param(utp, int, 0); -module_param(io, int, 0); -module_param(irq, int, 0); -module_param(clone, int, 0); -MODULE_PARM_DESC(bnc, "DE-620 set BNC medium (0-1)"); -MODULE_PARM_DESC(utp, "DE-620 set UTP medium (0-1)"); -MODULE_PARM_DESC(io, "DE-620 I/O base address,required"); -MODULE_PARM_DESC(irq, "DE-620 IRQ number,required"); -MODULE_PARM_DESC(clone, "Check also for non-D-Link DE-620 clones (0-1)"); - -/*********************************************** - * * - * Index to functions, as function prototypes. * - * * - ***********************************************/ - -/* - * Routines used internally. (See also "convenience macros.. below") - */ - -/* Put in the device structure. */ -static int de620_open(struct net_device *); -static int de620_close(struct net_device *); -static void de620_set_multicast_list(struct net_device *); -static int de620_start_xmit(struct sk_buff *, struct net_device *); - -/* Dispatch from interrupts. */ -static irqreturn_t de620_interrupt(int, void *); -static int de620_rx_intr(struct net_device *); - -/* Initialization */ -static int adapter_init(struct net_device *); -static int read_eeprom(struct net_device *); - - -/* - * D-Link driver variables: - */ -#define SCR_DEF NIBBLEMODE |INTON | SLEEP | AUTOTX -#define TCR_DEF RXPB /* not used: | TXSUCINT | T16INT */ -#define DE620_RX_START_PAGE 12 /* 12 pages (=3k) reserved for tx */ -#define DEF_NIC_CMD IRQEN | ICEN | DS1 - -static volatile byte NIC_Cmd; -static volatile byte next_rx_page; -static byte first_rx_page; -static byte last_rx_page; -static byte EIPRegister; - -static struct nic { - byte NodeID[6]; - byte RAM_Size; - byte Model; - byte Media; - byte SCR; -} nic_data; - -/********************************************************** - * * - * Convenience macros/functions for D-Link DE-620 adapter * - * * - **********************************************************/ -#define de620_tx_buffs(dd) (inb(STATUS_PORT) & (TXBF0 | TXBF1)) -#define de620_flip_ds(dd) NIC_Cmd ^= DS0 | DS1; outb(NIC_Cmd, COMMAND_PORT); - -/* Check for ready-status, and return a nibble (high 4 bits) for data input */ -#ifdef COUNT_LOOPS -static int tot_cnt; -#endif -static inline byte -de620_ready(struct net_device *dev) -{ - byte value; - register short int cnt = 0; - - while ((((value = inb(STATUS_PORT)) & READY) == 0) && (cnt <= 1000)) - ++cnt; - -#ifdef COUNT_LOOPS - tot_cnt += cnt; -#endif - return value & 0xf0; /* nibble */ -} - -static inline void -de620_send_command(struct net_device *dev, byte cmd) -{ - de620_ready(dev); - if (cmd == W_DUMMY) - outb(NIC_Cmd, COMMAND_PORT); - - outb(cmd, DATA_PORT); - - outb(NIC_Cmd ^ CS0, COMMAND_PORT); - de620_ready(dev); - outb(NIC_Cmd, COMMAND_PORT); -} - -static inline void -de620_put_byte(struct net_device *dev, byte value) -{ - /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */ - de620_ready(dev); - outb(value, DATA_PORT); - de620_flip_ds(dev); -} - -static inline byte -de620_read_byte(struct net_device *dev) -{ - byte value; - - /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */ - value = de620_ready(dev); /* High nibble */ - de620_flip_ds(dev); - value |= de620_ready(dev) >> 4; /* Low nibble */ - return value; -} - -static inline void -de620_write_block(struct net_device *dev, byte *buffer, int count, int pad) -{ -#ifndef LOWSPEED - byte uflip = NIC_Cmd ^ (DS0 | DS1); - byte dflip = NIC_Cmd; -#else /* LOWSPEED */ -#ifdef COUNT_LOOPS - int bytes = count; -#endif /* COUNT_LOOPS */ -#endif /* LOWSPEED */ - -#ifdef LOWSPEED -#ifdef COUNT_LOOPS - tot_cnt = 0; -#endif /* COUNT_LOOPS */ - /* No further optimization useful, the limit is in the adapter. */ - for ( ; count > 0; --count, ++buffer) { - de620_put_byte(dev,*buffer); - } - for ( count = pad ; count > 0; --count, ++buffer) { - de620_put_byte(dev, 0); - } - de620_send_command(dev,W_DUMMY); -#ifdef COUNT_LOOPS - /* trial debug output: loops per byte in de620_ready() */ - printk("WRITE(%d)\n", tot_cnt/((bytes?bytes:1))); -#endif /* COUNT_LOOPS */ -#else /* not LOWSPEED */ - for ( ; count > 0; count -=2) { - outb(*buffer++, DATA_PORT); - outb(uflip, COMMAND_PORT); - outb(*buffer++, DATA_PORT); - outb(dflip, COMMAND_PORT); - } - de620_send_command(dev,W_DUMMY); -#endif /* LOWSPEED */ -} - -static inline void -de620_read_block(struct net_device *dev, byte *data, int count) -{ -#ifndef LOWSPEED - byte value; - byte uflip = NIC_Cmd ^ (DS0 | DS1); - byte dflip = NIC_Cmd; -#else /* LOWSPEED */ -#ifdef COUNT_LOOPS - int bytes = count; - - tot_cnt = 0; -#endif /* COUNT_LOOPS */ -#endif /* LOWSPEED */ - -#ifdef LOWSPEED - /* No further optimization useful, the limit is in the adapter. */ - while (count-- > 0) { - *data++ = de620_read_byte(dev); - de620_flip_ds(dev); - } -#ifdef COUNT_LOOPS - /* trial debug output: loops per byte in de620_ready() */ - printk("READ(%d)\n", tot_cnt/(2*(bytes?bytes:1))); -#endif /* COUNT_LOOPS */ -#else /* not LOWSPEED */ - while (count-- > 0) { - value = inb(STATUS_PORT) & 0xf0; /* High nibble */ - outb(uflip, COMMAND_PORT); - *data++ = value | inb(STATUS_PORT) >> 4; /* Low nibble */ - outb(dflip , COMMAND_PORT); - } -#endif /* LOWSPEED */ -} - -static inline void -de620_set_delay(struct net_device *dev) -{ - de620_ready(dev); - outb(W_DFR, DATA_PORT); - outb(NIC_Cmd ^ CS0, COMMAND_PORT); - - de620_ready(dev); -#ifdef LOWSPEED - outb(WRITE_DELAY, DATA_PORT); -#else - outb(0, DATA_PORT); -#endif - de620_flip_ds(dev); - - de620_ready(dev); -#ifdef LOWSPEED - outb(READ_DELAY, DATA_PORT); -#else - outb(0, DATA_PORT); -#endif - de620_flip_ds(dev); -} - -static inline void -de620_set_register(struct net_device *dev, byte reg, byte value) -{ - de620_ready(dev); - outb(reg, DATA_PORT); - outb(NIC_Cmd ^ CS0, COMMAND_PORT); - - de620_put_byte(dev, value); -} - -static inline byte -de620_get_register(struct net_device *dev, byte reg) -{ - byte value; - - de620_send_command(dev,reg); - value = de620_read_byte(dev); - de620_send_command(dev,W_DUMMY); - - return value; -} - -/********************************************************************* - * - * Open/initialize the board. - * - * This routine should set everything up anew at each open, even - * registers that "should" only need to be set once at boot, so that - * there is a non-reboot way to recover if something goes wrong. - * - */ -static int de620_open(struct net_device *dev) -{ - int ret = request_irq(dev->irq, de620_interrupt, 0, dev->name, dev); - if (ret) { - printk (KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq); - return ret; - } - - if (adapter_init(dev)) { - ret = -EIO; - goto out_free_irq; - } - - netif_start_queue(dev); - return 0; - -out_free_irq: - free_irq(dev->irq, dev); - return ret; -} - -/************************************************ - * - * The inverse routine to de620_open(). - * - */ - -static int de620_close(struct net_device *dev) -{ - netif_stop_queue(dev); - /* disable recv */ - de620_set_register(dev, W_TCR, RXOFF); - free_irq(dev->irq, dev); - return 0; -} - -/********************************************* - * - * Set or clear the multicast filter for this adaptor. - * (no real multicast implemented for the DE-620, but she can be promiscuous...) - * - */ - -static void de620_set_multicast_list(struct net_device *dev) -{ - if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) - { /* Enable promiscuous mode */ - de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL); - } - else - { /* Disable promiscuous mode, use normal mode */ - de620_set_register(dev, W_TCR, TCR_DEF); - } -} - -/******************************************************* - * - * Handle timeouts on transmit - */ - -static void de620_timeout(struct net_device *dev) -{ - printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, "network cable problem"); - /* Restart the adapter. */ - if (!adapter_init(dev)) /* maybe close it */ - netif_wake_queue(dev); -} - -/******************************************************* - * - * Copy a buffer to the adapter transmit page memory. - * Start sending. - */ -static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - unsigned long flags; - int len; - byte *buffer = skb->data; - byte using_txbuf; - - using_txbuf = de620_tx_buffs(dev); /* Peek at the adapter */ - - netif_stop_queue(dev); - - - if ((len = skb->len) < RUNT) - len = RUNT; - if (len & 1) /* send an even number of bytes */ - ++len; - - /* Start real output */ - - spin_lock_irqsave(&de620_lock, flags); - pr_debug("de620_start_xmit: len=%d, bufs 0x%02x\n", - (int)skb->len, using_txbuf); - - /* select a free tx buffer. if there is one... */ - switch (using_txbuf) { - default: /* both are free: use TXBF0 */ - case TXBF1: /* use TXBF0 */ - de620_send_command(dev,W_CR | RW0); - using_txbuf |= TXBF0; - break; - - case TXBF0: /* use TXBF1 */ - de620_send_command(dev,W_CR | RW1); - using_txbuf |= TXBF1; - break; - - case (TXBF0 | TXBF1): /* NONE!!! */ - printk(KERN_WARNING "%s: No tx-buffer available!\n", dev->name); - spin_unlock_irqrestore(&de620_lock, flags); - return NETDEV_TX_BUSY; - } - de620_write_block(dev, buffer, skb->len, len-skb->len); - - if(!(using_txbuf == (TXBF0 | TXBF1))) - netif_wake_queue(dev); - - dev->stats.tx_packets++; - spin_unlock_irqrestore(&de620_lock, flags); - dev_kfree_skb (skb); - return NETDEV_TX_OK; -} - -/***************************************************** - * - * Handle the network interface interrupts. - * - */ -static irqreturn_t -de620_interrupt(int irq_in, void *dev_id) -{ - struct net_device *dev = dev_id; - byte irq_status; - int bogus_count = 0; - int again = 0; - - spin_lock(&de620_lock); - - /* Read the status register (_not_ the status port) */ - irq_status = de620_get_register(dev, R_STS); - - pr_debug("de620_interrupt (%2.2X)\n", irq_status); - - if (irq_status & RXGOOD) { - do { - again = de620_rx_intr(dev); - pr_debug("again=%d\n", again); - } - while (again && (++bogus_count < 100)); - } - - if(de620_tx_buffs(dev) != (TXBF0 | TXBF1)) - netif_wake_queue(dev); - - spin_unlock(&de620_lock); - return IRQ_HANDLED; -} - -/************************************** - * - * Get a packet from the adapter - * - * Send it "upstairs" - * - */ -static int de620_rx_intr(struct net_device *dev) -{ - struct header_buf { - byte status; - byte Rx_NextPage; - unsigned short Rx_ByteCount; - } header_buf; - struct sk_buff *skb; - int size; - byte *buffer; - byte pagelink; - byte curr_page; - - pr_debug("de620_rx_intr: next_rx_page = %d\n", next_rx_page); - - /* Tell the adapter that we are going to read data, and from where */ - de620_send_command(dev, W_CR | RRN); - de620_set_register(dev, W_RSA1, next_rx_page); - de620_set_register(dev, W_RSA0, 0); - - /* Deep breath, and away we goooooo */ - de620_read_block(dev, (byte *)&header_buf, sizeof(struct header_buf)); - pr_debug("page status=0x%02x, nextpage=%d, packetsize=%d\n", - header_buf.status, header_buf.Rx_NextPage, - header_buf.Rx_ByteCount); - - /* Plausible page header? */ - pagelink = header_buf.Rx_NextPage; - if ((pagelink < first_rx_page) || (last_rx_page < pagelink)) { - /* Ouch... Forget it! Skip all and start afresh... */ - printk(KERN_WARNING "%s: Ring overrun? Restoring...\n", dev->name); - /* You win some, you lose some. And sometimes plenty... */ - adapter_init(dev); - netif_wake_queue(dev); - dev->stats.rx_over_errors++; - return 0; - } - - /* OK, this look good, so far. Let's see if it's consistent... */ - /* Let's compute the start of the next packet, based on where we are */ - pagelink = next_rx_page + - ((header_buf.Rx_ByteCount + (4 - 1 + 0x100)) >> 8); - - /* Are we going to wrap around the page counter? */ - if (pagelink > last_rx_page) - pagelink -= (last_rx_page - first_rx_page + 1); - - /* Is the _computed_ next page number equal to what the adapter says? */ - if (pagelink != header_buf.Rx_NextPage) { - /* Naah, we'll skip this packet. Probably bogus data as well */ - printk(KERN_WARNING "%s: Page link out of sync! Restoring...\n", dev->name); - next_rx_page = header_buf.Rx_NextPage; /* at least a try... */ - de620_send_command(dev, W_DUMMY); - de620_set_register(dev, W_NPRF, next_rx_page); - dev->stats.rx_over_errors++; - return 0; - } - next_rx_page = pagelink; - - size = header_buf.Rx_ByteCount - 4; - if ((size < RUNT) || (GIANT < size)) { - printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size); - } - else { /* Good packet? */ - skb = netdev_alloc_skb(dev, size + 2); - if (skb == NULL) { /* Yeah, but no place to put it... */ - printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); - dev->stats.rx_dropped++; - } - else { /* Yep! Go get it! */ - skb_reserve(skb,2); /* Align */ - /* skb->data points to the start of sk_buff data area */ - buffer = skb_put(skb,size); - /* copy the packet into the buffer */ - de620_read_block(dev, buffer, size); - pr_debug("Read %d bytes\n", size); - skb->protocol=eth_type_trans(skb,dev); - netif_rx(skb); /* deliver it "upstairs" */ - /* count all receives */ - dev->stats.rx_packets++; - dev->stats.rx_bytes += size; - } - } - - /* Let's peek ahead to see if we have read the last current packet */ - /* NOTE! We're _not_ checking the 'EMPTY'-flag! This seems better... */ - curr_page = de620_get_register(dev, R_CPR); - de620_set_register(dev, W_NPRF, next_rx_page); - pr_debug("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page); - - return next_rx_page != curr_page; /* That was slightly tricky... */ -} - -/********************************************* - * - * Reset the adapter to a known state - * - */ -static int adapter_init(struct net_device *dev) -{ - int i; - static int was_down; - - if ((nic_data.Model == 3) || (nic_data.Model == 0)) { /* CT */ - EIPRegister = NCTL0; - if (nic_data.Media != 1) - EIPRegister |= NIS0; /* not BNC */ - } - else if (nic_data.Model == 2) { /* UTP */ - EIPRegister = NCTL0 | NIS0; - } - - if (utp) - EIPRegister = NCTL0 | NIS0; - if (bnc) - EIPRegister = NCTL0; - - de620_send_command(dev, W_CR | RNOP | CLEAR); - de620_send_command(dev, W_CR | RNOP); - - de620_set_register(dev, W_SCR, SCR_DEF); - /* disable recv to wait init */ - de620_set_register(dev, W_TCR, RXOFF); - - /* Set the node ID in the adapter */ - for (i = 0; i < 6; ++i) { /* W_PARn = 0xaa + n */ - de620_set_register(dev, W_PAR0 + i, dev->dev_addr[i]); - } - - de620_set_register(dev, W_EIP, EIPRegister); - - next_rx_page = first_rx_page = DE620_RX_START_PAGE; - if (nic_data.RAM_Size) - last_rx_page = nic_data.RAM_Size - 1; - else /* 64k RAM */ - last_rx_page = 255; - - de620_set_register(dev, W_SPR, first_rx_page); /* Start Page Register*/ - de620_set_register(dev, W_EPR, last_rx_page); /* End Page Register */ - de620_set_register(dev, W_CPR, first_rx_page);/*Current Page Register*/ - de620_send_command(dev, W_NPR | first_rx_page); /* Next Page Register*/ - de620_send_command(dev, W_DUMMY); - de620_set_delay(dev); - - /* Final sanity check: Anybody out there? */ - /* Let's hope some bits from the statusregister make a good check */ -#define CHECK_MASK ( 0 | TXSUC | T16 | 0 | RXCRC | RXSHORT | 0 | 0 ) -#define CHECK_OK ( 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 ) - /* success: X 0 0 X 0 0 X X */ - /* ignore: EEDI RXGOOD COLS LNKS*/ - - if (((i = de620_get_register(dev, R_STS)) & CHECK_MASK) != CHECK_OK) { - printk(KERN_ERR "%s: Something has happened to the DE-620! Please check it" -#ifdef SHUTDOWN_WHEN_LOST - " and do a new ifconfig" -#endif - "! (%02x)\n", dev->name, i); -#ifdef SHUTDOWN_WHEN_LOST - /* Goodbye, cruel world... */ - dev->flags &= ~IFF_UP; - de620_close(dev); -#endif - was_down = 1; - return 1; /* failed */ - } - if (was_down) { - printk(KERN_WARNING "%s: Thanks, I feel much better now!\n", dev->name); - was_down = 0; - } - - /* All OK, go ahead... */ - de620_set_register(dev, W_TCR, TCR_DEF); - - return 0; /* all ok */ -} - -static const struct net_device_ops de620_netdev_ops = { - .ndo_open = de620_open, - .ndo_stop = de620_close, - .ndo_start_xmit = de620_start_xmit, - .ndo_tx_timeout = de620_timeout, - .ndo_set_rx_mode = de620_set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/****************************************************************************** - * - * Only start-up code below - * - */ -/**************************************** - * - * Check if there is a DE-620 connected - */ -struct net_device * __init de620_probe(int unit) -{ - byte checkbyte = 0xa5; - struct net_device *dev; - int err = -ENOMEM; - int i; - - dev = alloc_etherdev(0); - if (!dev) - goto out; - - spin_lock_init(&de620_lock); - - /* - * This is where the base_addr and irq gets set. - * Tunable at compile-time and insmod-time - */ - dev->base_addr = io; - dev->irq = irq; - - /* allow overriding parameters on command line */ - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - - pr_debug("%s", version); - - printk(KERN_INFO "D-Link DE-620 pocket adapter"); - - if (!request_region(dev->base_addr, 3, "de620")) { - printk(" io 0x%3lX, which is busy.\n", dev->base_addr); - err = -EBUSY; - goto out1; - } - - /* Initially, configure basic nibble mode, so we can read the EEPROM */ - NIC_Cmd = DEF_NIC_CMD; - de620_set_register(dev, W_EIP, EIPRegister); - - /* Anybody out there? */ - de620_set_register(dev, W_CPR, checkbyte); - checkbyte = de620_get_register(dev, R_CPR); - - if ((checkbyte != 0xa5) || (read_eeprom(dev) != 0)) { - printk(" not identified in the printer port\n"); - err = -ENODEV; - goto out2; - } - - /* else, got it! */ - dev->dev_addr[0] = nic_data.NodeID[0]; - for (i = 1; i < ETH_ALEN; i++) { - dev->dev_addr[i] = nic_data.NodeID[i]; - dev->broadcast[i] = 0xff; - } - - printk(", Ethernet Address: %pM", dev->dev_addr); - - printk(" (%dk RAM,", - (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64); - - if (nic_data.Media == 1) - printk(" BNC)\n"); - else - printk(" UTP)\n"); - - dev->netdev_ops = &de620_netdev_ops; - dev->watchdog_timeo = HZ*2; - - /* base_addr and irq are already set, see above! */ - - /* dump eeprom */ - pr_debug("\nEEPROM contents:\n" - "RAM_Size = 0x%02X\n" - "NodeID = %pM\n" - "Model = %d\n" - "Media = %d\n" - "SCR = 0x%02x\n", nic_data.RAM_Size, nic_data.NodeID, - nic_data.Model, nic_data.Media, nic_data.SCR); - - err = register_netdev(dev); - if (err) - goto out2; - return dev; - -out2: - release_region(dev->base_addr, 3); -out1: - free_netdev(dev); -out: - return ERR_PTR(err); -} - -/********************************** - * - * Read info from on-board EEPROM - * - * Note: Bitwise serial I/O to/from the EEPROM vi the status _register_! - */ -#define sendit(dev,data) de620_set_register(dev, W_EIP, data | EIPRegister); - -static unsigned short __init ReadAWord(struct net_device *dev, int from) -{ - unsigned short data; - int nbits; - - /* cs [__~~] SET SEND STATE */ - /* di [____] */ - /* sck [_~~_] */ - sendit(dev, 0); sendit(dev, 1); sendit(dev, 5); sendit(dev, 4); - - /* Send the 9-bit address from where we want to read the 16-bit word */ - for (nbits = 9; nbits > 0; --nbits, from <<= 1) { - if (from & 0x0100) { /* bit set? */ - /* cs [~~~~] SEND 1 */ - /* di [~~~~] */ - /* sck [_~~_] */ - sendit(dev, 6); sendit(dev, 7); sendit(dev, 7); sendit(dev, 6); - } - else { - /* cs [~~~~] SEND 0 */ - /* di [____] */ - /* sck [_~~_] */ - sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4); - } - } - - /* Shift in the 16-bit word. The bits appear serially in EEDI (=0x80) */ - for (data = 0, nbits = 16; nbits > 0; --nbits) { - /* cs [~~~~] SEND 0 */ - /* di [____] */ - /* sck [_~~_] */ - sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4); - data = (data << 1) | ((de620_get_register(dev, R_STS) & EEDI) >> 7); - } - /* cs [____] RESET SEND STATE */ - /* di [____] */ - /* sck [_~~_] */ - sendit(dev, 0); sendit(dev, 1); sendit(dev, 1); sendit(dev, 0); - - return data; -} - -static int __init read_eeprom(struct net_device *dev) -{ - unsigned short wrd; - - /* D-Link Ethernet addresses are in the series 00:80:c8:7X:XX:XX:XX */ - wrd = ReadAWord(dev, 0x1aa); /* bytes 0 + 1 of NodeID */ - if (!clone && (wrd != htons(0x0080))) /* Valid D-Link ether sequence? */ - return -1; /* Nope, not a DE-620 */ - nic_data.NodeID[0] = wrd & 0xff; - nic_data.NodeID[1] = wrd >> 8; - - wrd = ReadAWord(dev, 0x1ab); /* bytes 2 + 3 of NodeID */ - if (!clone && ((wrd & 0xff) != 0xc8)) /* Valid D-Link ether sequence? */ - return -1; /* Nope, not a DE-620 */ - nic_data.NodeID[2] = wrd & 0xff; - nic_data.NodeID[3] = wrd >> 8; - - wrd = ReadAWord(dev, 0x1ac); /* bytes 4 + 5 of NodeID */ - nic_data.NodeID[4] = wrd & 0xff; - nic_data.NodeID[5] = wrd >> 8; - - wrd = ReadAWord(dev, 0x1ad); /* RAM size in pages (256 bytes). 0 = 64k */ - nic_data.RAM_Size = (wrd >> 8); - - wrd = ReadAWord(dev, 0x1ae); /* hardware model (CT = 3) */ - nic_data.Model = (wrd & 0xff); - - wrd = ReadAWord(dev, 0x1af); /* media (indicates BNC/UTP) */ - nic_data.Media = (wrd & 0xff); - - wrd = ReadAWord(dev, 0x1a8); /* System Configuration Register */ - nic_data.SCR = (wrd >> 8); - - return 0; /* no errors */ -} - -/****************************************************************************** - * - * Loadable module skeleton - * - */ -#ifdef MODULE -static struct net_device *de620_dev; - -int __init init_module(void) -{ - de620_dev = de620_probe(-1); - if (IS_ERR(de620_dev)) - return PTR_ERR(de620_dev); - return 0; -} - -void cleanup_module(void) -{ - unregister_netdev(de620_dev); - release_region(de620_dev->base_addr, 3); - free_netdev(de620_dev); -} -#endif /* MODULE */ -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/dlink/de620.h b/drivers/net/ethernet/dlink/de620.h deleted file mode 100644 index e8d9a88f4cb5..000000000000 --- a/drivers/net/ethernet/dlink/de620.h +++ /dev/null @@ -1,117 +0,0 @@ -/********************************************************* - * * - * Definition of D-Link DE-620 Ethernet Pocket adapter * - * * - *********************************************************/ - -/* DE-620's CMD port Command */ -#define CS0 0x08 /* 1->0 command strobe */ -#define ICEN 0x04 /* 0=enable DL3520 host interface */ -#define DS0 0x02 /* 1->0 data strobe 0 */ -#define DS1 0x01 /* 1->0 data strobe 1 */ - -#define WDIR 0x20 /* general 0=read 1=write */ -#define RDIR 0x00 /* (not 100% confirm ) */ -#define PS2WDIR 0x00 /* ps/2 mode 1=read, 0=write */ -#define PS2RDIR 0x20 - -#define IRQEN 0x10 /* 1 = enable printer IRQ line */ -#define SELECTIN 0x08 /* 1 = select printer */ -#define INITP 0x04 /* 0 = initial printer */ -#define AUTOFEED 0x02 /* 1 = printer auto form feed */ -#define STROBE 0x01 /* 0->1 data strobe */ - -#define RESET 0x08 -#define NIS0 0x20 /* 0 = BNC, 1 = UTP */ -#define NCTL0 0x10 - -/* DE-620 DIC Command */ -#define W_DUMMY 0x00 /* DIC reserved command */ -#define W_CR 0x20 /* DIC write command register */ -#define W_NPR 0x40 /* DIC write Next Page Register */ -#define W_TBR 0x60 /* DIC write Tx Byte Count 1 reg */ -#define W_RSA 0x80 /* DIC write Remote Start Addr 1 */ - -/* DE-620's STAT port bits 7-4 */ -#define EMPTY 0x80 /* 1 = receive buffer empty */ -#define INTLEVEL 0x40 /* 1 = interrupt level is high */ -#define TXBF1 0x20 /* 1 = transmit buffer 1 is in use */ -#define TXBF0 0x10 /* 1 = transmit buffer 0 is in use */ -#define READY 0x08 /* 1 = h/w ready to accept cmd/data */ - -/* IDC 1 Command */ -#define W_RSA1 0xa0 /* write remote start address 1 */ -#define W_RSA0 0xa1 /* write remote start address 0 */ -#define W_NPRF 0xa2 /* write next page register NPR15-NPR8 */ -#define W_DFR 0xa3 /* write delay factor register */ -#define W_CPR 0xa4 /* write current page register */ -#define W_SPR 0xa5 /* write start page register */ -#define W_EPR 0xa6 /* write end page register */ -#define W_SCR 0xa7 /* write system configuration register */ -#define W_TCR 0xa8 /* write Transceiver Configuration reg */ -#define W_EIP 0xa9 /* write EEPM Interface port */ -#define W_PAR0 0xaa /* write physical address register 0 */ -#define W_PAR1 0xab /* write physical address register 1 */ -#define W_PAR2 0xac /* write physical address register 2 */ -#define W_PAR3 0xad /* write physical address register 3 */ -#define W_PAR4 0xae /* write physical address register 4 */ -#define W_PAR5 0xaf /* write physical address register 5 */ - -/* IDC 2 Command */ -#define R_STS 0xc0 /* read status register */ -#define R_CPR 0xc1 /* read current page register */ -#define R_BPR 0xc2 /* read boundary page register */ -#define R_TDR 0xc3 /* read time domain reflectometry reg */ - -/* STATUS Register */ -#define EEDI 0x80 /* EEPM DO pin */ -#define TXSUC 0x40 /* tx success */ -#define T16 0x20 /* tx fail 16 times */ -#define TS1 0x40 /* 0=Tx success, 1=T16 */ -#define TS0 0x20 /* 0=Tx success, 1=T16 */ -#define RXGOOD 0x10 /* rx a good packet */ -#define RXCRC 0x08 /* rx a CRC error packet */ -#define RXSHORT 0x04 /* rx a short packet */ -#define COLS 0x02 /* coaxial collision status */ -#define LNKS 0x01 /* UTP link status */ - -/* Command Register */ -#define CLEAR 0x10 /* reset part of hardware */ -#define NOPER 0x08 /* No Operation */ -#define RNOP 0x08 -#define RRA 0x06 /* After RR then auto-advance NPR & BPR(=NPR-1) */ -#define RRN 0x04 /* Normal Remote Read mode */ -#define RW1 0x02 /* Remote Write tx buffer 1 ( page 6 - 11 ) */ -#define RW0 0x00 /* Remote Write tx buffer 0 ( page 0 - 5 ) */ -#define TXEN 0x01 /* 0->1 tx enable */ - -/* System Configuration Register */ -#define TESTON 0x80 /* test host data transfer reliability */ -#define SLEEP 0x40 /* sleep mode */ -#if 0 -#define FASTMODE 0x04 /* fast mode for intel 82360SL fast mode */ -#define BYTEMODE 0x02 /* byte mode */ -#else -#define FASTMODE 0x20 /* fast mode for intel 82360SL fast mode */ -#define BYTEMODE 0x10 /* byte mode */ -#endif -#define NIBBLEMODE 0x00 /* nibble mode */ -#define IRQINV 0x08 /* turn off IRQ line inverter */ -#define IRQNML 0x00 /* turn on IRQ line inverter */ -#define INTON 0x04 -#define AUTOFFSET 0x02 /* auto shift address to TPR+12 */ -#define AUTOTX 0x01 /* auto tx when leave RW mode */ - -/* Transceiver Configuration Register */ -#define JABBER 0x80 /* generate jabber condition */ -#define TXSUCINT 0x40 /* enable tx success interrupt */ -#define T16INT 0x20 /* enable T16 interrupt */ -#define RXERRPKT 0x10 /* accept CRC error or short packet */ -#define EXTERNALB2 0x0C /* external loopback 2 */ -#define EXTERNALB1 0x08 /* external loopback 1 */ -#define INTERNALB 0x04 /* internal loopback */ -#define NMLOPERATE 0x00 /* normal operation */ -#define RXPBM 0x03 /* rx physical, broadcast, multicast */ -#define RXPB 0x02 /* rx physical, broadcast */ -#define RXALL 0x01 /* rx all packet */ -#define RXOFF 0x00 /* rx disable */ diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 1d342d37915c..110d26f4c602 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -1156,9 +1156,10 @@ set_multicast (struct net_device *dev) static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); - strcpy(info->driver, "dl2k"); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(np->pdev)); + + strlcpy(info->driver, "dl2k", sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index 28fc11b2f1ea..50d9c6315930 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -530,7 +530,6 @@ static int sundance_probe1(struct pci_dev *pdev, for (i = 0; i < 3; i++) ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); np = netdev_priv(dev); np->base = ioaddr; diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 2c177b329c8b..f3d60eb13c3a 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -281,11 +281,11 @@ static int dnet_mii_probe(struct net_device *dev) /* attach the mac to the phy */ if (bp->capabilities & DNET_HAS_RMII) { phydev = phy_connect(dev, dev_name(&phydev->dev), - &dnet_handle_link_change, 0, + &dnet_handle_link_change, PHY_INTERFACE_MODE_RMII); } else { phydev = phy_connect(dev, dev_name(&phydev->dev), - &dnet_handle_link_change, 0, + &dnet_handle_link_change, PHY_INTERFACE_MODE_MII); } diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index f1b3df167ff2..28ceb8414185 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -34,7 +34,7 @@ #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "4.4.161.0u" +#define DRV_VER "4.6.62.0u" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 8a250c38fb82..071aea79d218 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -93,13 +93,16 @@ static void be_mcc_notify(struct be_adapter *adapter) * little endian) */ static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) { + u32 flags; + if (compl->flags != 0) { - compl->flags = le32_to_cpu(compl->flags); - BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); - return true; - } else { - return false; + flags = le32_to_cpu(compl->flags); + if (flags & CQE_FLAGS_VALID_MASK) { + compl->flags = flags; + return true; + } } + return false; } /* Need to reset the entire word that houses the valid bit */ @@ -3138,6 +3141,39 @@ err: return status; } +int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, + int vf_num) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_iface_list *req; + struct be_cmd_resp_get_iface_list *resp; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp), + wrb, NULL); + req->hdr.domain = vf_num + 1; + + status = be_mcc_notify_wait(adapter); + if (!status) { + resp = (struct be_cmd_resp_get_iface_list *)req; + vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id); + } + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + /* Uses sync mcc */ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index d6552e19ffee..96970860c915 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -203,6 +203,7 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_GET_FN_PRIVILEGES 170 #define OPCODE_COMMON_READ_OBJECT 171 #define OPCODE_COMMON_WRITE_OBJECT 172 +#define OPCODE_COMMON_GET_IFACE_LIST 194 #define OPCODE_COMMON_ENABLE_DISABLE_VF 196 #define OPCODE_ETH_RSS_CONFIG 1 @@ -1795,6 +1796,23 @@ static inline bool check_privilege(struct be_adapter *adapter, u32 flags) return flags & adapter->cmd_privileges ? true : false; } +/************** Get IFACE LIST *******************/ +struct be_if_desc { + u32 if_id; + u32 cap_flags; + u32 en_flags; +}; + +struct be_cmd_req_get_iface_list { + struct be_cmd_req_hdr hdr; +}; + +struct be_cmd_resp_get_iface_list { + struct be_cmd_req_hdr hdr; + u32 if_cnt; + struct be_if_desc if_desc; +}; + extern int be_pci_fnum_get(struct be_adapter *adapter); extern int be_fw_wait_ready(struct be_adapter *adapter); extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, @@ -1917,4 +1935,6 @@ extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags, extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain); +extern int be_cmd_get_if_id(struct be_adapter *adapter, + struct be_vf_cfg *vf_cfg, int vf_num); extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 00454a10f88d..76b302f30c87 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -183,12 +183,12 @@ static void be_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version)); - strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN); - if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) { - strcat(drvinfo->fw_version, " ["); - strcat(drvinfo->fw_version, fw_on_flash); - strcat(drvinfo->fw_version, "]"); - } + if (!memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN)) + strlcpy(drvinfo->fw_version, adapter->fw_ver, + sizeof(drvinfo->fw_version)); + else + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%s [%s]", adapter->fw_ver, fw_on_flash); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 4d6f3c54427a..3860888ac711 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2597,7 +2597,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) * These addresses are programmed in the ASIC by the PF and the VF driver * queries for the MAC address during its probe. */ -static inline int be_vf_eth_addr_config(struct be_adapter *adapter) +static int be_vf_eth_addr_config(struct be_adapter *adapter) { u32 vf; int status = 0; @@ -2626,13 +2626,34 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter) return status; } +static int be_vfs_mac_query(struct be_adapter *adapter) +{ + int status, vf; + u8 mac[ETH_ALEN]; + struct be_vf_cfg *vf_cfg; + bool active; + + for_all_vfs(adapter, vf_cfg, vf) { + be_cmd_get_mac_from_list(adapter, mac, &active, + &vf_cfg->pmac_id, 0); + + status = be_cmd_mac_addr_query(adapter, mac, false, + vf_cfg->if_handle, 0); + if (status) + return status; + memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); + } + return 0; +} + static void be_vf_clear(struct be_adapter *adapter) { struct be_vf_cfg *vf_cfg; u32 vf; if (be_find_vfs(adapter, ASSIGNED)) { - dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n"); + dev_warn(&adapter->pdev->dev, + "VFs are assigned to VMs: not disabling VFs\n"); goto done; } @@ -2681,21 +2702,29 @@ static int be_clear(struct be_adapter *adapter) return 0; } -static void be_get_vf_if_cap_flags(struct be_adapter *adapter, - u32 *cap_flags, u8 domain) +static int be_vfs_if_create(struct be_adapter *adapter) { - bool profile_present = false; + struct be_vf_cfg *vf_cfg; + u32 cap_flags, en_flags, vf; int status; - if (lancer_chip(adapter)) { - status = be_cmd_get_profile_config(adapter, cap_flags, domain); - if (!status) - profile_present = true; - } + cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_MULTICAST; - if (!profile_present) - *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST; + for_all_vfs(adapter, vf_cfg, vf) { + if (!BE3_chip(adapter)) + be_cmd_get_profile_config(adapter, &cap_flags, vf + 1); + + /* If a FW profile exists, then cap_flags are updated */ + en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | + BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST); + status = be_cmd_if_create(adapter, cap_flags, en_flags, + &vf_cfg->if_handle, vf + 1); + if (status) + goto err; + } +err: + return status; } static int be_vf_setup_init(struct be_adapter *adapter) @@ -2718,65 +2747,70 @@ static int be_vf_setup_init(struct be_adapter *adapter) static int be_vf_setup(struct be_adapter *adapter) { struct be_vf_cfg *vf_cfg; - struct device *dev = &adapter->pdev->dev; - u32 cap_flags, en_flags, vf; u16 def_vlan, lnk_speed; - int status, enabled_vfs; - - enabled_vfs = be_find_vfs(adapter, ENABLED); - if (enabled_vfs) { - dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs); - dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs); - return 0; - } - - if (num_vfs > adapter->dev_num_vfs) { - dev_warn(dev, "Device supports %d VFs and not %d\n", - adapter->dev_num_vfs, num_vfs); - num_vfs = adapter->dev_num_vfs; - } + int status, old_vfs, vf; + struct device *dev = &adapter->pdev->dev; - status = pci_enable_sriov(adapter->pdev, num_vfs); - if (!status) { - adapter->num_vfs = num_vfs; + old_vfs = be_find_vfs(adapter, ENABLED); + if (old_vfs) { + dev_info(dev, "%d VFs are already enabled\n", old_vfs); + if (old_vfs != num_vfs) + dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs); + adapter->num_vfs = old_vfs; } else { - /* Platform doesn't support SRIOV though device supports it */ - dev_warn(dev, "SRIOV enable failed\n"); - return 0; + if (num_vfs > adapter->dev_num_vfs) + dev_info(dev, "Device supports %d VFs and not %d\n", + adapter->dev_num_vfs, num_vfs); + adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs); + + status = pci_enable_sriov(adapter->pdev, num_vfs); + if (status) { + dev_err(dev, "SRIOV enable failed\n"); + adapter->num_vfs = 0; + return 0; + } } status = be_vf_setup_init(adapter); if (status) goto err; - for_all_vfs(adapter, vf_cfg, vf) { - be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1); - - en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | - BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST); - - status = be_cmd_if_create(adapter, cap_flags, en_flags, - &vf_cfg->if_handle, vf + 1); + if (old_vfs) { + for_all_vfs(adapter, vf_cfg, vf) { + status = be_cmd_get_if_id(adapter, vf_cfg, vf); + if (status) + goto err; + } + } else { + status = be_vfs_if_create(adapter); if (status) goto err; } - if (!enabled_vfs) { + if (old_vfs) { + status = be_vfs_mac_query(adapter); + if (status) + goto err; + } else { status = be_vf_eth_addr_config(adapter); if (status) goto err; } for_all_vfs(adapter, vf_cfg, vf) { - lnk_speed = 1000; - status = be_cmd_set_qos(adapter, lnk_speed, vf + 1); - if (status) - goto err; - vf_cfg->tx_rate = lnk_speed * 10; + /* BE3 FW, by default, caps VF TX-rate to 100mbps. + * Allow full available bandwidth + */ + if (BE3_chip(adapter) && !old_vfs) + be_cmd_set_qos(adapter, 1000, vf+1); + + status = be_cmd_link_status_query(adapter, &lnk_speed, + NULL, vf + 1); + if (!status) + vf_cfg->tx_rate = lnk_speed; status = be_cmd_get_hsw_config(adapter, &def_vlan, - vf + 1, vf_cfg->if_handle); + vf + 1, vf_cfg->if_handle); if (status) goto err; vf_cfg->def_vid = def_vlan; @@ -2785,6 +2819,8 @@ static int be_vf_setup(struct be_adapter *adapter) } return 0; err: + dev_err(dev, "VF setup failed\n"); + be_vf_clear(adapter); return status; } @@ -2838,12 +2874,12 @@ static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle, static void be_get_resources(struct be_adapter *adapter) { - int status; + u16 dev_num_vfs; + int pos, status; bool profile_present = false; - if (lancer_chip(adapter)) { + if (!BEx_chip(adapter)) { status = be_cmd_get_func_config(adapter); - if (!status) profile_present = true; } @@ -2899,13 +2935,21 @@ static void be_get_resources(struct be_adapter *adapter) if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) adapter->if_cap_flags |= BE_IF_FLAGS_RSS; } + + pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF, + &dev_num_vfs); + if (BE3_chip(adapter)) + dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS); + adapter->dev_num_vfs = dev_num_vfs; + } } /* Routine to query per function resource limits */ static int be_get_config(struct be_adapter *adapter) { - int pos, status; - u16 dev_num_vfs; + int status; status = be_cmd_query_fw_cfg(adapter, &adapter->port_num, &adapter->function_mode, @@ -2923,14 +2967,6 @@ static int be_get_config(struct be_adapter *adapter) goto err; } - pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV); - if (pos) { - pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF, - &dev_num_vfs); - if (!lancer_chip(adapter)) - dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS); - adapter->dev_num_vfs = dev_num_vfs; - } err: return status; } diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 8db1c06008de..5722bc61fa58 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -206,7 +206,7 @@ struct ethoc { unsigned int num_rx; unsigned int cur_rx; - void** vma; + void **vma; struct net_device *netdev; struct napi_struct napi; @@ -292,7 +292,7 @@ static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start) { struct ethoc_bd bd; int i; - void* vma; + void *vma; dev->cur_tx = 0; dev->dty_tx = 0; @@ -447,8 +447,8 @@ static int ethoc_rx(struct net_device *dev, int limit) netif_receive_skb(skb); } else { if (net_ratelimit()) - dev_warn(&dev->dev, "low on memory - " - "packet dropped\n"); + dev_warn(&dev->dev, + "low on memory - packet dropped\n"); dev->stats.rx_dropped++; break; @@ -555,9 +555,8 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id) pending = ethoc_read(priv, INT_SOURCE); pending &= mask; - if (unlikely(pending == 0)) { + if (unlikely(pending == 0)) return IRQ_NONE; - } ethoc_ack_irq(priv, pending); @@ -620,7 +619,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); - for (i=0; i < 5; i++) { + for (i = 0; i < 5; i++) { u32 status = ethoc_read(priv, MIISTATUS); if (!(status & MIISTATUS_BUSY)) { u32 data = ethoc_read(priv, MIIRX_DATA); @@ -628,7 +627,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) ethoc_write(priv, MIICOMMAND, 0); return data; } - usleep_range(100,200); + usleep_range(100, 200); } return -EBUSY; @@ -643,14 +642,14 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) ethoc_write(priv, MIITX_DATA, val); ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); - for (i=0; i < 5; i++) { + for (i = 0; i < 5; i++) { u32 stat = ethoc_read(priv, MIISTATUS); if (!(stat & MIISTATUS_BUSY)) { /* reset MII command register */ ethoc_write(priv, MIICOMMAND, 0); return 0; } - usleep_range(100,200); + usleep_range(100, 200); } return -EBUSY; @@ -671,19 +670,18 @@ static int ethoc_mdio_probe(struct net_device *dev) struct phy_device *phy; int err; - if (priv->phy_id != -1) { + if (priv->phy_id != -1) phy = priv->mdio->phy_map[priv->phy_id]; - } else { + else phy = phy_find_first(priv->mdio); - } if (!phy) { dev_err(&dev->dev, "no PHY found\n"); return -ENXIO; } - err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0, - PHY_INTERFACE_MODE_GMII); + err = phy_connect_direct(dev, phy, ethoc_mdio_poll, + PHY_INTERFACE_MODE_GMII); if (err) { dev_err(&dev->dev, "could not attach to PHY\n"); return err; @@ -771,21 +769,24 @@ static int ethoc_config(struct net_device *dev, struct ifmap *map) return -ENOSYS; } -static int ethoc_set_mac_address(struct net_device *dev, void *addr) +static void ethoc_do_set_mac_address(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); - u8 *mac = (u8 *)addr; - - if (!is_valid_ether_addr(mac)) - return -EADDRNOTAVAIL; + unsigned char *mac = dev->dev_addr; ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | (mac[5] << 0)); ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0)); +} - memcpy(dev->dev_addr, mac, ETH_ALEN); - dev->addr_assign_type &= ~NET_ADDR_RANDOM; +static int ethoc_set_mac_address(struct net_device *dev, void *p) +{ + const struct sockaddr *addr = p; + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + ethoc_do_set_mac_address(dev); return 0; } @@ -1022,7 +1023,7 @@ static int ethoc_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n", priv->num_tx, priv->num_rx); - priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL); + priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL); if (!priv->vma) { ret = -ENOMEM; goto error; @@ -1038,7 +1039,7 @@ static int ethoc_probe(struct platform_device *pdev) #ifdef CONFIG_OF { - const uint8_t* mac; + const uint8_t *mac; mac = of_get_property(pdev->dev.of_node, "local-mac-address", @@ -1050,25 +1051,23 @@ static int ethoc_probe(struct platform_device *pdev) } /* Check that the given MAC address is valid. If it isn't, read the - * current MAC from the controller. */ + * current MAC from the controller. + */ if (!is_valid_ether_addr(netdev->dev_addr)) ethoc_get_mac_address(netdev, netdev->dev_addr); /* Check the MAC again for validity, if it still isn't choose and - * program a random one. */ + * program a random one. + */ if (!is_valid_ether_addr(netdev->dev_addr)) { eth_random_addr(netdev->dev_addr); random_mac = true; } - ret = ethoc_set_mac_address(netdev, netdev->dev_addr); - if (ret) { - dev_err(&netdev->dev, "failed to set MAC address\n"); - goto error; - } + ethoc_do_set_mac_address(netdev); if (random_mac) - netdev->addr_assign_type |= NET_ADDR_RANDOM; + netdev->addr_assign_type = NET_ADDR_RANDOM; /* register MII bus */ priv->mdio = mdiobus_alloc(); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 74d749e29aab..7c361d1db94c 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -858,8 +858,7 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv) } phydev = phy_connect(netdev, dev_name(&phydev->dev), - &ftgmac100_adjust_link, 0, - PHY_INTERFACE_MODE_GMII); + &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phydev)) { netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); @@ -955,9 +954,9 @@ static int ftgmac100_mdiobus_reset(struct mii_bus *bus) static void ftgmac100_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, dev_name(&netdev->dev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); } static int ftgmac100_get_settings(struct net_device *netdev, diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index b901a01e3fa5..b5ea8fbd8a76 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -820,9 +820,9 @@ static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg, static void ftmac100_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, dev_name(&netdev->dev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); } static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index ec490d741fc0..6048dc8604ee 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -26,6 +26,7 @@ config FEC ARCH_MXC || SOC_IMX28) default ARCH_MXC || SOC_IMX28 if ARM select PHYLIB + select PTP_1588_CLOCK ---help--- Say Y here if you want to use the built-in 10/100 Fast ethernet controller on some Motorola ColdFire and Freescale i.MX processors. @@ -92,12 +93,4 @@ config GIANFAR This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, and MPC86xx family of chips, and the FEC on the 8540. -config FEC_PTP - bool "PTP Hardware Clock (PHC)" - depends on FEC && ARCH_MXC && !SOC_IMX25 && !SOC_IMX27 && !SOC_IMX35 && !SOC_IMX5 - select PTP_1588_CLOCK - --help--- - Say Y here if you want to use PTP Hardware Clock (PHC) in the - driver. Only the basic clock operations have been implemented. - endif # NET_VENDOR_FREESCALE diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index d4d19b3d00ae..b7d58fe6f531 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -2,8 +2,7 @@ # Makefile for the Freescale network device drivers. # -obj-$(CONFIG_FEC) += fec.o -obj-$(CONFIG_FEC_PTP) += fec_ptp.o +obj-$(CONFIG_FEC) += fec.o fec_ptp.o obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 0704bcab178a..29d82cf1528e 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c @@ -67,6 +67,15 @@ #endif #define DRIVER_NAME "fec" +#define FEC_NAPI_WEIGHT 64 + +/* Pause frame feild and FIFO threshold */ +#define FEC_ENET_FCE (1 << 5) +#define FEC_ENET_RSEM_V 0x84 +#define FEC_ENET_RSFL_V 16 +#define FEC_ENET_RAEM_V 0x8 +#define FEC_ENET_RAFL_V 0x8 +#define FEC_ENET_OPD_V 0xFFF0 /* Controller is ENET-MAC */ #define FEC_QUIRK_ENET_MAC (1 << 0) @@ -76,6 +85,8 @@ #define FEC_QUIRK_USE_GASKET (1 << 2) /* Controller has GBIT support */ #define FEC_QUIRK_HAS_GBIT (1 << 3) +/* Controller has extend desc buffer */ +#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) static struct platform_device_id fec_devtype[] = { { @@ -93,7 +104,8 @@ static struct platform_device_id fec_devtype[] = { .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, }, { .name = "imx6q-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT, + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX, }, { /* sentinel */ } @@ -140,7 +152,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #endif #endif /* CONFIG_M5272 */ -#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) +#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE) #error "FEC: descriptor ring size constants too large" #endif @@ -157,6 +169,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) +#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) /* The FEC stores dest/src/type, data, and checksum for receive packets. */ @@ -190,8 +203,29 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); /* Transmitter timeout */ #define TX_TIMEOUT (2 * HZ) +#define FEC_PAUSE_FLAG_AUTONEG 0x1 +#define FEC_PAUSE_FLAG_ENABLE 0x2 + static int mii_cnt; +static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) +{ + struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; + if (is_ex) + return (struct bufdesc *)(ex + 1); + else + return bdp + 1; +} + +static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) +{ + struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; + if (is_ex) + return (struct bufdesc *)(ex - 1); + else + return bdp - 1; +} + static void *swap_buffer(void *bufaddr, int len) { int i; @@ -248,7 +282,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) */ if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { unsigned int index; - index = bdp - fep->tx_bd_base; + if (fep->bufdesc_ex) + index = (struct bufdesc_ex *)bdp - + (struct bufdesc_ex *)fep->tx_bd_base; + else + index = bdp - fep->tx_bd_base; memcpy(fep->tx_bounce[index], skb->data, skb->len); bufaddr = fep->tx_bounce[index]; } @@ -280,17 +318,19 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | BD_ENET_TX_LAST | BD_ENET_TX_TC); bdp->cbd_sc = status; -#ifdef CONFIG_FEC_PTP - bdp->cbd_bdu = 0; - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + if (fep->bufdesc_ex) { + + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + ebdp->cbd_bdu = 0; + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && fep->hwts_tx_en)) { - bdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT); + ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT); skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - } else { + } else { - bdp->cbd_esc = BD_ENET_TX_INT; + ebdp->cbd_esc = BD_ENET_TX_INT; + } } -#endif /* Trigger transmission start */ writel(0, fep->hwp + FEC_X_DES_ACTIVE); @@ -298,7 +338,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (status & BD_ENET_TX_WRAP) bdp = fep->tx_bd_base; else - bdp++; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); if (bdp == fep->dirty_tx) { fep->tx_full = 1; @@ -359,8 +399,12 @@ fec_restart(struct net_device *ndev, int duplex) /* Set receive and transmit descriptor base. */ writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, - fep->hwp + FEC_X_DES_START); + if (fep->bufdesc_ex) + writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) + * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); + else + writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) + * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; fep->cur_rx = fep->rx_bd_base; @@ -439,6 +483,25 @@ fec_restart(struct net_device *ndev, int duplex) } #endif } + + /* enable pause frame*/ + if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || + ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && + fep->phy_dev && fep->phy_dev->pause)) { + rcntl |= FEC_ENET_FCE; + + /* set FIFO thresh hold parameter to reduce overrun */ + writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); + writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); + writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); + writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); + + /* OPD */ + writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); + } else { + rcntl &= ~FEC_ENET_FCE; + } + writel(rcntl, fep->hwp + FEC_R_CNTRL); if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { @@ -448,17 +511,16 @@ fec_restart(struct net_device *ndev, int duplex) writel(1 << 8, fep->hwp + FEC_X_WMRK); } -#ifdef CONFIG_FEC_PTP - ecntl |= (1 << 4); -#endif + if (fep->bufdesc_ex) + ecntl |= (1 << 4); /* And last, enable the transmit and receive processing */ writel(ecntl, fep->hwp + FEC_ECNTRL); writel(0, fep->hwp + FEC_R_DES_ACTIVE); -#ifdef CONFIG_FEC_PTP - fec_ptp_start_cyclecounter(ndev); -#endif + if (fep->bufdesc_ex) + fec_ptp_start_cyclecounter(ndev); + /* Enable interrupts we wish to service */ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } @@ -544,19 +606,20 @@ fec_enet_tx(struct net_device *ndev) ndev->stats.tx_packets++; } -#ifdef CONFIG_FEC_PTP - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && + fep->bufdesc_ex) { struct skb_shared_hwtstamps shhwtstamps; unsigned long flags; + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; memset(&shhwtstamps, 0, sizeof(shhwtstamps)); spin_lock_irqsave(&fep->tmreg_lock, flags); shhwtstamps.hwtstamp = ns_to_ktime( - timecounter_cyc2time(&fep->tc, bdp->ts)); + timecounter_cyc2time(&fep->tc, ebdp->ts)); spin_unlock_irqrestore(&fep->tmreg_lock, flags); skb_tstamp_tx(skb, &shhwtstamps); } -#endif + if (status & BD_ENET_TX_READY) printk("HEY! Enet xmit interrupt and TX_READY.\n"); @@ -575,7 +638,7 @@ fec_enet_tx(struct net_device *ndev) if (status & BD_ENET_TX_WRAP) bdp = fep->tx_bd_base; else - bdp++; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); /* Since we have freed up a buffer, the ring is no longer full */ @@ -595,8 +658,8 @@ fec_enet_tx(struct net_device *ndev) * not been given to the system, we just set the empty indicator, * effectively tossing the packet. */ -static void -fec_enet_rx(struct net_device *ndev) +static int +fec_enet_rx(struct net_device *ndev, int budget) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = @@ -606,13 +669,12 @@ fec_enet_rx(struct net_device *ndev) struct sk_buff *skb; ushort pkt_len; __u8 *data; + int pkt_received = 0; #ifdef CONFIG_M532x flush_cache_all(); #endif - spin_lock(&fep->hw_lock); - /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ @@ -620,6 +682,10 @@ fec_enet_rx(struct net_device *ndev) while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { + if (pkt_received >= budget) + break; + pkt_received++; + /* Since we have allocated space to hold a complete frame, * the last indicator should be set. */ @@ -683,23 +749,25 @@ fec_enet_rx(struct net_device *ndev) skb_put(skb, pkt_len - 4); /* Make room */ skb_copy_to_linear_data(skb, data, pkt_len - 4); skb->protocol = eth_type_trans(skb, ndev); -#ifdef CONFIG_FEC_PTP + /* Get receive timestamp from the skb */ - if (fep->hwts_rx_en) { + if (fep->hwts_rx_en && fep->bufdesc_ex) { struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); unsigned long flags; + struct bufdesc_ex *ebdp = + (struct bufdesc_ex *)bdp; memset(shhwtstamps, 0, sizeof(*shhwtstamps)); spin_lock_irqsave(&fep->tmreg_lock, flags); shhwtstamps->hwtstamp = ns_to_ktime( - timecounter_cyc2time(&fep->tc, bdp->ts)); + timecounter_cyc2time(&fep->tc, ebdp->ts)); spin_unlock_irqrestore(&fep->tmreg_lock, flags); } -#endif + if (!skb_defer_rx_timestamp(skb)) - netif_rx(skb); + napi_gro_receive(&fep->napi, skb); } bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, @@ -712,17 +780,19 @@ rx_processing_done: status |= BD_ENET_RX_EMPTY; bdp->cbd_sc = status; -#ifdef CONFIG_FEC_PTP - bdp->cbd_esc = BD_ENET_RX_INT; - bdp->cbd_prot = 0; - bdp->cbd_bdu = 0; -#endif + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + + ebdp->cbd_esc = BD_ENET_RX_INT; + ebdp->cbd_prot = 0; + ebdp->cbd_bdu = 0; + } /* Update BD pointer to next entry */ if (status & BD_ENET_RX_WRAP) bdp = fep->rx_bd_base; else - bdp++; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. @@ -731,7 +801,7 @@ rx_processing_done: } fep->cur_rx = bdp; - spin_unlock(&fep->hw_lock); + return pkt_received; } static irqreturn_t @@ -748,7 +818,13 @@ fec_enet_interrupt(int irq, void *dev_id) if (int_events & FEC_ENET_RXF) { ret = IRQ_HANDLED; - fec_enet_rx(ndev); + + /* Disable the RX interrupt */ + if (napi_schedule_prep(&fep->napi)) { + writel(FEC_RX_DISABLED_IMASK, + fep->hwp + FEC_IMASK); + __napi_schedule(&fep->napi); + } } /* Transmit OK, or non-fatal error. Update the buffer @@ -769,10 +845,21 @@ fec_enet_interrupt(int irq, void *dev_id) return ret; } +static int fec_enet_rx_napi(struct napi_struct *napi, int budget) +{ + struct net_device *ndev = napi->dev; + int pkts = fec_enet_rx(ndev, budget); + struct fec_enet_private *fep = netdev_priv(ndev); + if (pkts < budget) { + napi_complete(napi); + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + } + return pkts; +} /* ------------------------------------------------------------------------- */ -static void __inline__ fec_get_mac(struct net_device *ndev) +static void fec_get_mac(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct fec_platform_data *pdata = fep->pdev->dev.platform_data; @@ -973,7 +1060,7 @@ static int fec_enet_mii_probe(struct net_device *ndev) } snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id); - phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0, + phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, fep->phy_interface); if (IS_ERR(phy_dev)) { printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name); @@ -981,8 +1068,10 @@ static int fec_enet_mii_probe(struct net_device *ndev) } /* mask with MAC supported features */ - if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) + if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { phy_dev->supported &= PHY_GBIT_FEATURES; + phy_dev->supported |= SUPPORTED_Pause; + } else phy_dev->supported &= PHY_BASIC_FEATURES; @@ -1133,17 +1222,95 @@ static void fec_enet_get_drvinfo(struct net_device *ndev, { struct fec_enet_private *fep = netdev_priv(ndev); - strcpy(info->driver, fep->pdev->dev.driver->name); - strcpy(info->version, "Revision: 1.0"); - strcpy(info->bus_info, dev_name(&ndev->dev)); + strlcpy(info->driver, fep->pdev->dev.driver->name, + sizeof(info->driver)); + strlcpy(info->version, "Revision: 1.0", sizeof(info->version)); + strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); +} + +static int fec_enet_get_ts_info(struct net_device *ndev, + struct ethtool_ts_info *info) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (fep->bufdesc_ex) { + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + if (fep->ptp_clock) + info->phc_index = ptp_clock_index(fep->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + return 0; + } else { + return ethtool_op_get_ts_info(ndev, info); + } +} + +static void fec_enet_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; + pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; + pause->rx_pause = pause->tx_pause; +} + +static int fec_enet_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (pause->tx_pause != pause->rx_pause) { + netdev_info(ndev, + "hardware only support enable/disable both tx and rx"); + return -EINVAL; + } + + fep->pause_flag = 0; + + /* tx pause must be same as rx pause */ + fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; + fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; + + if (pause->rx_pause || pause->autoneg) { + fep->phy_dev->supported |= ADVERTISED_Pause; + fep->phy_dev->advertising |= ADVERTISED_Pause; + } else { + fep->phy_dev->supported &= ~ADVERTISED_Pause; + fep->phy_dev->advertising &= ~ADVERTISED_Pause; + } + + if (pause->autoneg) { + if (netif_running(ndev)) + fec_stop(ndev); + phy_start_aneg(fep->phy_dev); + } + if (netif_running(ndev)) + fec_restart(ndev, 0); + + return 0; } static const struct ethtool_ops fec_enet_ethtool_ops = { + .get_pauseparam = fec_enet_get_pauseparam, + .set_pauseparam = fec_enet_set_pauseparam, .get_settings = fec_enet_get_settings, .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, .get_link = ethtool_op_get_link, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = fec_enet_get_ts_info, }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) @@ -1157,10 +1324,9 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) if (!phydev) return -ENODEV; -#ifdef CONFIG_FEC_PTP - if (cmd == SIOCSHWTSTAMP) + if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex) return fec_ptp_ioctl(ndev, rq, cmd); -#endif + return phy_mii_ioctl(phydev, rq, cmd); } @@ -1180,7 +1346,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); if (skb) dev_kfree_skb(skb); - bdp++; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); } bdp = fep->tx_bd_base; @@ -1207,14 +1373,17 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); bdp->cbd_sc = BD_ENET_RX_EMPTY; -#ifdef CONFIG_FEC_PTP - bdp->cbd_esc = BD_ENET_RX_INT; -#endif - bdp++; + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + ebdp->cbd_esc = BD_ENET_RX_INT; + } + + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); } /* Set the last buffer to wrap. */ - bdp--; + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); bdp->cbd_sc |= BD_SC_WRAP; bdp = fep->tx_bd_base; @@ -1224,14 +1393,16 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) bdp->cbd_sc = 0; bdp->cbd_bufaddr = 0; -#ifdef CONFIG_FEC_PTP - bdp->cbd_esc = BD_ENET_RX_INT; -#endif - bdp++; + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + ebdp->cbd_esc = BD_ENET_RX_INT; + } + + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); } /* Set the last buffer to wrap. */ - bdp--; + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); bdp->cbd_sc |= BD_SC_WRAP; return 0; @@ -1243,6 +1414,8 @@ fec_enet_open(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); int ret; + napi_enable(&fep->napi); + /* I should reset the ring buffers here, but I don't yet know * a simple way to do that. */ @@ -1444,24 +1617,31 @@ static int fec_enet_init(struct net_device *ndev) /* Set receive and transmit descriptor base. */ fep->rx_bd_base = cbd_base; - fep->tx_bd_base = cbd_base + RX_RING_SIZE; + if (fep->bufdesc_ex) + fep->tx_bd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE); + else + fep->tx_bd_base = cbd_base + RX_RING_SIZE; /* The FEC Ethernet specific entries in the device structure */ ndev->watchdog_timeo = TX_TIMEOUT; ndev->netdev_ops = &fec_netdev_ops; ndev->ethtool_ops = &fec_enet_ethtool_ops; + writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); + netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); + /* Initialize the receive buffer descriptors. */ bdp = fep->rx_bd_base; for (i = 0; i < RX_RING_SIZE; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = 0; - bdp++; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); } /* Set the last buffer to wrap */ - bdp--; + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); bdp->cbd_sc |= BD_SC_WRAP; /* ...and the same for transmit */ @@ -1471,11 +1651,11 @@ static int fec_enet_init(struct net_device *ndev) /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = 0; bdp->cbd_bufaddr = 0; - bdp++; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); } /* Set the last buffer to wrap */ - bdp--; + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); bdp->cbd_sc |= BD_SC_WRAP; fec_restart(ndev, 0); @@ -1509,22 +1689,25 @@ static void fec_reset_phy(struct platform_device *pdev) msec = 1; phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); + if (!gpio_is_valid(phy_reset)) + return; + err = devm_gpio_request_one(&pdev->dev, phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset"); if (err) { - pr_debug("FEC: failed to get gpio phy-reset: %d\n", err); + dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); return; } msleep(msec); gpio_set_value(phy_reset, 1); } #else /* CONFIG_OF */ -static inline int fec_get_phy_mode_dt(struct platform_device *pdev) +static int fec_get_phy_mode_dt(struct platform_device *pdev) { return -ENODEV; } -static inline void fec_reset_phy(struct platform_device *pdev) +static void fec_reset_phy(struct platform_device *pdev) { /* * In case of platform probe, the reset has been done @@ -1570,10 +1753,17 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); + /* default enable pause frame auto negotiation */ + if (pdev->id_entry && + (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) + fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; + fep->hwp = ioremap(r->start, resource_size(r)); fep->pdev = pdev; fep->dev_id = dev_id++; + fep->bufdesc_ex = 0; + if (!fep->hwp) { ret = -ENOMEM; goto failed_ioremap; @@ -1628,19 +1818,19 @@ fec_probe(struct platform_device *pdev) goto failed_clk; } -#ifdef CONFIG_FEC_PTP fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); + fep->bufdesc_ex = + pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; if (IS_ERR(fep->clk_ptp)) { ret = PTR_ERR(fep->clk_ptp); - goto failed_clk; + fep->bufdesc_ex = 0; } -#endif clk_prepare_enable(fep->clk_ahb); clk_prepare_enable(fep->clk_ipg); -#ifdef CONFIG_FEC_PTP - clk_prepare_enable(fep->clk_ptp); -#endif + if (!IS_ERR(fep->clk_ptp)) + clk_prepare_enable(fep->clk_ptp); + reg_phy = devm_regulator_get(&pdev->dev, "phy"); if (!IS_ERR(reg_phy)) { ret = regulator_enable(reg_phy); @@ -1653,6 +1843,9 @@ fec_probe(struct platform_device *pdev) fec_reset_phy(pdev); + if (fep->bufdesc_ex) + fec_ptp_init(ndev, pdev); + ret = fec_enet_init(ndev); if (ret) goto failed_init; @@ -1668,10 +1861,6 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_register; -#ifdef CONFIG_FEC_PTP - fec_ptp_init(ndev, pdev); -#endif - return 0; failed_register: @@ -1681,9 +1870,8 @@ failed_init: failed_regulator: clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); -#ifdef CONFIG_FEC_PTP - clk_disable_unprepare(fep->clk_ptp); -#endif + if (!IS_ERR(fep->clk_ptp)) + clk_disable_unprepare(fep->clk_ptp); failed_pin: failed_clk: for (i = 0; i < FEC_IRQ_NUM; i++) { @@ -1716,12 +1904,10 @@ fec_drv_remove(struct platform_device *pdev) if (irq > 0) free_irq(irq, ndev); } -#ifdef CONFIG_FEC_PTP del_timer_sync(&fep->time_keep); clk_disable_unprepare(fep->clk_ptp); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); -#endif clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); iounmap(fep->hwp); diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index c5a3bc1475c7..01579b8e37c4 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -13,11 +13,9 @@ #define FEC_H /****************************************************************************/ -#ifdef CONFIG_FEC_PTP #include <linux/clocksource.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> -#endif #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ @@ -50,6 +48,10 @@ #define FEC_R_DES_START 0x180 /* Receive descriptor ring */ #define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ #define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ +#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */ +#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ +#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ +#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ @@ -94,14 +96,17 @@ struct bufdesc { unsigned short cbd_datlen; /* Data length */ unsigned short cbd_sc; /* Control and status info */ unsigned long cbd_bufaddr; /* Buffer address */ -#ifdef CONFIG_FEC_PTP +}; + +struct bufdesc_ex { + struct bufdesc desc; unsigned long cbd_esc; unsigned long cbd_prot; unsigned long cbd_bdu; unsigned long ts; unsigned short res0[4]; -#endif }; + #else struct bufdesc { unsigned short cbd_sc; /* Control and status info */ @@ -203,9 +208,7 @@ struct fec_enet_private { struct clk *clk_ipg; struct clk *clk_ahb; -#ifdef CONFIG_FEC_PTP struct clk *clk_ptp; -#endif /* The saved address of a sent-in-place packet/buffer, for skfree(). */ unsigned char *tx_bounce[TX_RING_SIZE]; @@ -243,8 +246,11 @@ struct fec_enet_private { int full_duplex; struct completion mdio_done; int irq[FEC_IRQ_NUM]; + int bufdesc_ex; + int pause_flag; + + struct napi_struct napi; -#ifdef CONFIG_FEC_PTP struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; unsigned long last_overflow_check; @@ -257,15 +263,12 @@ struct fec_enet_private { int hwts_rx_en; int hwts_tx_en; struct timer_list time_keep; -#endif }; -#ifdef CONFIG_FEC_PTP void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd); -#endif /****************************************************************************/ #endif /* FEC_H */ diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 817d081d2cd8..7f91b0c5c264 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -29,6 +29,7 @@ #include <linux/delay.h> #include <linux/of_device.h> #include <linux/of_mdio.h> +#include <linux/of_net.h> #include <linux/of_platform.h> #include <linux/netdevice.h> @@ -76,10 +77,6 @@ static void mpc52xx_fec_stop(struct net_device *dev); static void mpc52xx_fec_start(struct net_device *dev); static void mpc52xx_fec_reset(struct net_device *dev); -static u8 mpc52xx_fec_mac_addr[6]; -module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0); -MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe"); - #define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \ NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) static int debug = -1; /* the above default */ @@ -110,15 +107,6 @@ static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac) out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE); } -static void mpc52xx_fec_get_paddr(struct net_device *dev, u8 *mac) -{ - struct mpc52xx_fec_priv *priv = netdev_priv(dev); - struct mpc52xx_fec __iomem *fec = priv->fec; - - *(u32 *)(&mac[0]) = in_be32(&fec->paddr1); - *(u16 *)(&mac[4]) = in_be32(&fec->paddr2) >> 16; -} - static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sock = addr; @@ -853,6 +841,8 @@ static int mpc52xx_fec_probe(struct platform_device *op) struct resource mem; const u32 *prop; int prop_size; + struct device_node *np = op->dev.of_node; + const char *mac_addr; phys_addr_t rx_fifo; phys_addr_t tx_fifo; @@ -866,7 +856,7 @@ static int mpc52xx_fec_probe(struct platform_device *op) priv->ndev = ndev; /* Reserve FEC control zone */ - rv = of_address_to_resource(op->dev.of_node, 0, &mem); + rv = of_address_to_resource(np, 0, &mem); if (rv) { printk(KERN_ERR DRIVER_NAME ": " "Error while parsing device node resource\n" ); @@ -919,7 +909,7 @@ static int mpc52xx_fec_probe(struct platform_device *op) /* Get the IRQ we need one by one */ /* Control */ - ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0); + ndev->irq = irq_of_parse_and_map(np, 0); /* RX */ priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk); @@ -927,11 +917,33 @@ static int mpc52xx_fec_probe(struct platform_device *op) /* TX */ priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk); - /* MAC address init */ - if (!is_zero_ether_addr(mpc52xx_fec_mac_addr)) - memcpy(ndev->dev_addr, mpc52xx_fec_mac_addr, 6); - else - mpc52xx_fec_get_paddr(ndev, ndev->dev_addr); + /* + * MAC address init: + * + * First try to read MAC address from DT + */ + mac_addr = of_get_mac_address(np); + if (mac_addr) { + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + } else { + struct mpc52xx_fec __iomem *fec = priv->fec; + + /* + * If the MAC addresse is not provided via DT then read + * it back from the controller regs + */ + *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1); + *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16; + } + + /* + * Check if the MAC address is valid, if not get a random one + */ + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + dev_warn(&ndev->dev, "using random MAC address %pM\n", + ndev->dev_addr); + } priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT); @@ -942,20 +954,20 @@ static int mpc52xx_fec_probe(struct platform_device *op) /* Start with safe defaults for link connection */ priv->speed = 100; priv->duplex = DUPLEX_HALF; - priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1; + priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1; /* The current speed preconfigures the speed of the MII link */ - prop = of_get_property(op->dev.of_node, "current-speed", &prop_size); + prop = of_get_property(np, "current-speed", &prop_size); if (prop && (prop_size >= sizeof(u32) * 2)) { priv->speed = prop[0]; priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF; } /* If there is a phy handle, then get the PHY node */ - priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); + priv->phy_node = of_parse_phandle(np, "phy-handle", 0); /* the 7-wire property means don't use MII mode */ - if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) { + if (of_find_property(np, "fsl,7-wire-mode", NULL)) { priv->seven_wire_mode = 1; dev_info(&ndev->dev, "using 7-wire PHY mode\n"); } @@ -970,6 +982,8 @@ static int mpc52xx_fec_probe(struct platform_device *op) /* We're done ! */ dev_set_drvdata(&op->dev, ndev); + printk(KERN_INFO "%s: %s MAC %pM\n", + ndev->name, op->dev.of_node->full_name, ndev->dev_addr); return 0; diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index c40526c78c20..1f17ca0f2201 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -104,7 +104,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev) unsigned long flags; int inc; - inc = 1000000000 / clk_get_rate(fep->clk_ptp); + inc = 1000000000 / fep->cycle_speed; /* grab the ptp lock */ spin_lock_irqsave(&fep->tmreg_lock, flags); @@ -363,6 +363,8 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev) fep->ptp_caps.settime = fec_ptp_settime; fep->ptp_caps.enable = fec_ptp_enable; + fep->cycle_speed = clk_get_rate(fep->clk_ptp); + spin_lock_init(&fep->tmreg_lock); fec_ptp_start_cyclecounter(ndev); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index e9879c5af7ba..46df28893c10 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -888,8 +888,8 @@ static struct net_device_stats *fs_enet_get_stats(struct net_device *dev) static void fs_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } static int fs_get_regs_len(struct net_device *dev) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index bffb2edd6858..4b5e8a692481 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -133,8 +133,8 @@ static void gfar_netpoll(struct net_device *dev); #endif int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); -static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull, struct napi_struct *napi); +static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, + int amount_pull, struct napi_struct *napi); void gfar_halt(struct net_device *dev); static void gfar_halt_nodisable(struct net_device *dev); void gfar_start(struct net_device *dev); @@ -231,7 +231,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) dma_addr_t addr; int i, j, k; struct gfar_private *priv = netdev_priv(ndev); - struct device *dev = &priv->ofdev->dev; + struct device *dev = priv->dev; struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; @@ -277,14 +277,12 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) /* Setup the skbuff rings */ for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; - tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * - tx_queue->tx_ring_size, - GFP_KERNEL); - if (!tx_queue->tx_skbuff) { - netif_err(priv, ifup, ndev, - "Could not allocate tx_skbuff\n"); + tx_queue->tx_skbuff = + kmalloc_array(tx_queue->tx_ring_size, + sizeof(*tx_queue->tx_skbuff), + GFP_KERNEL); + if (!tx_queue->tx_skbuff) goto cleanup; - } for (k = 0; k < tx_queue->tx_ring_size; k++) tx_queue->tx_skbuff[k] = NULL; @@ -292,15 +290,12 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * - rx_queue->rx_ring_size, - GFP_KERNEL); - - if (!rx_queue->rx_skbuff) { - netif_err(priv, ifup, ndev, - "Could not allocate rx_skbuff\n"); + rx_queue->rx_skbuff = + kmalloc_array(rx_queue->rx_ring_size, + sizeof(*rx_queue->rx_skbuff), + GFP_KERNEL); + if (!rx_queue->rx_skbuff) goto cleanup; - } for (j = 0; j < rx_queue->rx_ring_size; j++) rx_queue->rx_skbuff[j] = NULL; @@ -349,14 +344,23 @@ static void gfar_init_mac(struct net_device *ndev) /* Configure the coalescing support */ gfar_configure_coalescing(priv, 0xFF, 0xFF); + /* set this when rx hw offload (TOE) functions are being used */ + priv->uses_rxfcb = 0; + if (priv->rx_filer_enable) { rctrl |= RCTRL_FILREN; /* Program the RIR0 reg with the required distribution */ gfar_write(®s->rir0, DEFAULT_RIR0); } - if (ndev->features & NETIF_F_RXCSUM) + /* Restore PROMISC mode */ + if (ndev->flags & IFF_PROMISC) + rctrl |= RCTRL_PROM; + + if (ndev->features & NETIF_F_RXCSUM) { rctrl |= RCTRL_CHECKSUMMING; + priv->uses_rxfcb = 1; + } if (priv->extended_hash) { rctrl |= RCTRL_EXTHASH; @@ -378,11 +382,15 @@ static void gfar_init_mac(struct net_device *ndev) } /* Enable HW time stamping if requested from user space */ - if (priv->hwts_rx_en) + if (priv->hwts_rx_en) { rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; + priv->uses_rxfcb = 1; + } - if (ndev->features & NETIF_F_HW_VLAN_RX) + if (ndev->features & NETIF_F_HW_VLAN_RX) { rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; + priv->uses_rxfcb = 1; + } /* Init rctrl based on our settings */ gfar_write(®s->rctrl, rctrl); @@ -501,20 +509,6 @@ void unlock_tx_qs(struct gfar_private *priv) spin_unlock(&priv->tx_queue[i]->txlock); } -static bool gfar_is_vlan_on(struct gfar_private *priv) -{ - return (priv->ndev->features & NETIF_F_HW_VLAN_RX) || - (priv->ndev->features & NETIF_F_HW_VLAN_TX); -} - -/* Returns 1 if incoming frames use an FCB */ -static inline int gfar_uses_fcb(struct gfar_private *priv) -{ - return gfar_is_vlan_on(priv) || - (priv->ndev->features & NETIF_F_RXCSUM) || - (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); -} - static void free_tx_pointers(struct gfar_private *priv) { int i; @@ -540,6 +534,19 @@ static void unmap_group_regs(struct gfar_private *priv) iounmap(priv->gfargrp[i].regs); } +static void free_gfar_dev(struct gfar_private *priv) +{ + int i, j; + + for (i = 0; i < priv->num_grps; i++) + for (j = 0; j < GFAR_NUM_IRQS; j++) { + kfree(priv->gfargrp[i].irqinfo[j]); + priv->gfargrp[i].irqinfo[j] = NULL; + } + + free_netdev(priv->ndev); +} + static void disable_napi(struct gfar_private *priv) { int i; @@ -559,40 +566,46 @@ static void enable_napi(struct gfar_private *priv) static int gfar_parse_group(struct device_node *np, struct gfar_private *priv, const char *model) { + struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; u32 *queue_mask; + int i; + + for (i = 0; i < GFAR_NUM_IRQS; i++) { + grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), + GFP_KERNEL); + if (!grp->irqinfo[i]) + return -ENOMEM; + } - priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0); - if (!priv->gfargrp[priv->num_grps].regs) + grp->regs = of_iomap(np, 0); + if (!grp->regs) return -ENOMEM; - priv->gfargrp[priv->num_grps].interruptTransmit = - irq_of_parse_and_map(np, 0); + gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); /* If we aren't the FEC we have multiple interrupts */ if (model && strcasecmp(model, "FEC")) { - priv->gfargrp[priv->num_grps].interruptReceive = - irq_of_parse_and_map(np, 1); - priv->gfargrp[priv->num_grps].interruptError = - irq_of_parse_and_map(np,2); - if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ || - priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ || - priv->gfargrp[priv->num_grps].interruptError == NO_IRQ) + gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); + gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); + if (gfar_irq(grp, TX)->irq == NO_IRQ || + gfar_irq(grp, RX)->irq == NO_IRQ || + gfar_irq(grp, ER)->irq == NO_IRQ) return -EINVAL; } - priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; - priv->gfargrp[priv->num_grps].priv = priv; - spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); + grp->grp_id = priv->num_grps; + grp->priv = priv; + spin_lock_init(&grp->grplock); if (priv->mode == MQ_MG_MODE) { queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); - priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ? + grp->rx_bit_map = queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); - priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ? + grp->tx_bit_map = queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); } else { - priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; - priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; + grp->rx_bit_map = 0xFF; + grp->tx_bit_map = 0xFF; } priv->num_grps++; @@ -645,7 +658,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) return -ENOMEM; priv = netdev_priv(dev); - priv->node = ofdev->dev.of_node; priv->ndev = dev; priv->num_tx_queues = num_tx_qs; @@ -777,7 +789,7 @@ tx_alloc_failed: free_tx_pointers(priv); err_grp_init: unmap_group_regs(priv); - free_netdev(dev); + free_gfar_dev(priv); return err; } @@ -983,7 +995,7 @@ static int gfar_probe(struct platform_device *ofdev) priv = netdev_priv(dev); priv->ndev = dev; priv->ofdev = ofdev; - priv->node = ofdev->dev.of_node; + priv->dev = &ofdev->dev; SET_NETDEV_DEV(dev, &ofdev->dev); spin_lock_init(&priv->bflock); @@ -1020,8 +1032,6 @@ static int gfar_probe(struct platform_device *ofdev) /* Set the dev->base_addr to the gfar reg region */ dev->base_addr = (unsigned long) regs; - SET_NETDEV_DEV(dev, &ofdev->dev); - /* Fill in the dev structure */ dev->watchdog_timeo = TX_TIMEOUT; dev->mtu = 1500; @@ -1182,15 +1192,16 @@ static int gfar_probe(struct platform_device *ofdev) /* fill out IRQ number and name fields */ for (i = 0; i < priv->num_grps; i++) { + struct gfar_priv_grp *grp = &priv->gfargrp[i]; if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { - sprintf(priv->gfargrp[i].int_name_tx, "%s%s%c%s", + sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", dev->name, "_g", '0' + i, "_tx"); - sprintf(priv->gfargrp[i].int_name_rx, "%s%s%c%s", + sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", dev->name, "_g", '0' + i, "_rx"); - sprintf(priv->gfargrp[i].int_name_er, "%s%s%c%s", + sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", dev->name, "_g", '0' + i, "_er"); } else - strcpy(priv->gfargrp[i].int_name_tx, dev->name); + strcpy(gfar_irq(grp, TX)->name, dev->name); } /* Initialize the filer table */ @@ -1223,7 +1234,7 @@ register_fail: of_node_put(priv->phy_node); if (priv->tbi_node) of_node_put(priv->tbi_node); - free_netdev(dev); + free_gfar_dev(priv); return err; } @@ -1240,7 +1251,7 @@ static int gfar_remove(struct platform_device *ofdev) unregister_netdev(priv->ndev); unmap_group_regs(priv); - free_netdev(priv->ndev); + free_gfar_dev(priv); return 0; } @@ -1648,9 +1659,9 @@ void gfar_halt(struct net_device *dev) static void free_grp_irqs(struct gfar_priv_grp *grp) { - free_irq(grp->interruptError, grp); - free_irq(grp->interruptTransmit, grp); - free_irq(grp->interruptReceive, grp); + free_irq(gfar_irq(grp, TX)->irq, grp); + free_irq(gfar_irq(grp, RX)->irq, grp); + free_irq(gfar_irq(grp, ER)->irq, grp); } void stop_gfar(struct net_device *dev) @@ -1679,7 +1690,7 @@ void stop_gfar(struct net_device *dev) free_grp_irqs(&priv->gfargrp[i]); } else { for (i = 0; i < priv->num_grps; i++) - free_irq(priv->gfargrp[i].interruptTransmit, + free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, &priv->gfargrp[i]); } @@ -1698,13 +1709,13 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) if (!tx_queue->tx_skbuff[i]) continue; - dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, + dma_unmap_single(priv->dev, txbdp->bufPtr, txbdp->length, DMA_TO_DEVICE); txbdp->lstatus = 0; for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) { txbdp++; - dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, + dma_unmap_page(priv->dev, txbdp->bufPtr, txbdp->length, DMA_TO_DEVICE); } txbdp++; @@ -1725,8 +1736,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) for (i = 0; i < rx_queue->rx_ring_size; i++) { if (rx_queue->rx_skbuff[i]) { - dma_unmap_single(&priv->ofdev->dev, - rxbdp->bufPtr, priv->rx_buffer_size, + dma_unmap_single(priv->dev, rxbdp->bufPtr, + priv->rx_buffer_size, DMA_FROM_DEVICE); dev_kfree_skb_any(rx_queue->rx_skbuff[i]); rx_queue->rx_skbuff[i] = NULL; @@ -1765,7 +1776,7 @@ static void free_skb_resources(struct gfar_private *priv) free_skb_rx_queue(rx_queue); } - dma_free_coherent(&priv->ofdev->dev, + dma_free_coherent(priv->dev, sizeof(struct txbd8) * priv->total_tx_ring_size + sizeof(struct rxbd8) * priv->total_rx_ring_size, priv->tx_queue[0]->tx_bd_base, @@ -1854,32 +1865,34 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) /* Install our interrupt handlers for Error, * Transmit, and Receive */ - if ((err = request_irq(grp->interruptError, gfar_error, - 0, grp->int_name_er, grp)) < 0) { + err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, + gfar_irq(grp, ER)->name, grp); + if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", - grp->interruptError); + gfar_irq(grp, ER)->irq); goto err_irq_fail; } - - if ((err = request_irq(grp->interruptTransmit, gfar_transmit, - 0, grp->int_name_tx, grp)) < 0) { + err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, + gfar_irq(grp, TX)->name, grp); + if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", - grp->interruptTransmit); + gfar_irq(grp, TX)->irq); goto tx_irq_fail; } - - if ((err = request_irq(grp->interruptReceive, gfar_receive, - 0, grp->int_name_rx, grp)) < 0) { + err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, + gfar_irq(grp, RX)->name, grp); + if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", - grp->interruptReceive); + gfar_irq(grp, RX)->irq); goto rx_irq_fail; } } else { - if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, - 0, grp->int_name_tx, grp)) < 0) { + err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, + gfar_irq(grp, TX)->name, grp); + if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", - grp->interruptTransmit); + gfar_irq(grp, TX)->irq); goto err_irq_fail; } } @@ -1887,9 +1900,9 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) return 0; rx_irq_fail: - free_irq(grp->interruptTransmit, grp); + free_irq(gfar_irq(grp, TX)->irq, grp); tx_irq_fail: - free_irq(grp->interruptError, grp); + free_irq(gfar_irq(grp, ER)->irq, grp); err_irq_fail: return err; @@ -2143,7 +2156,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) if (i == nr_frags - 1) lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); - bufaddr = skb_frag_dma_map(&priv->ofdev->dev, + bufaddr = skb_frag_dma_map(priv->dev, &skb_shinfo(skb)->frags[i], 0, length, @@ -2195,7 +2208,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) lstatus |= BD_LFLAG(TXBD_TOE); } - txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, + txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); /* If time stamping is requested one additional TxBD must be set up. The @@ -2308,10 +2321,13 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv) tempval = gfar_read(®s->rctrl); /* If parse is no longer required, then disable parser */ - if (tempval & RCTRL_REQ_PARSER) + if (tempval & RCTRL_REQ_PARSER) { tempval |= RCTRL_PRSDEP_INIT; - else + priv->uses_rxfcb = 1; + } else { tempval &= ~RCTRL_PRSDEP_INIT; + priv->uses_rxfcb = 0; + } gfar_write(®s->rctrl, tempval); } @@ -2344,6 +2360,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) tempval = gfar_read(®s->rctrl); tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); gfar_write(®s->rctrl, tempval); + priv->uses_rxfcb = 1; } else { /* Disable VLAN tag extraction */ tempval = gfar_read(®s->rctrl); @@ -2367,15 +2384,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) int oldsize = priv->rx_buffer_size; int frame_size = new_mtu + ETH_HLEN; - if (gfar_is_vlan_on(priv)) - frame_size += VLAN_HLEN; - if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { netif_err(priv, drv, dev, "Invalid MTU setting\n"); return -EINVAL; } - if (gfar_uses_fcb(priv)) + if (priv->uses_rxfcb) frame_size += GMAC_FCB_LEN; frame_size += priv->padding; @@ -2508,7 +2522,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) } else buflen = bdp->length; - dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, + dma_unmap_single(priv->dev, bdp->bufPtr, buflen, DMA_TO_DEVICE); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { @@ -2527,7 +2541,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) bdp = next_txbd(bdp, base, tx_ring_size); for (i = 0; i < frags; i++) { - dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr, + dma_unmap_page(priv->dev, bdp->bufPtr, bdp->length, DMA_TO_DEVICE); bdp->lstatus &= BD_LFLAG(TXBD_WRAP); bdp = next_txbd(bdp, base, tx_ring_size); @@ -2593,7 +2607,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, struct gfar_private *priv = netdev_priv(dev); dma_addr_t buf; - buf = dma_map_single(&priv->ofdev->dev, skb->data, + buf = dma_map_single(priv->dev, skb->data, priv->rx_buffer_size, DMA_FROM_DEVICE); gfar_init_rxbdp(rx_queue, bdp, buf); } @@ -2627,7 +2641,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev) if (status & RXBD_TRUNCATED) { stats->rx_length_errors++; - estats->rx_trunc++; + atomic64_inc(&estats->rx_trunc); return; } @@ -2636,20 +2650,20 @@ static inline void count_errors(unsigned short status, struct net_device *dev) stats->rx_length_errors++; if (status & RXBD_LARGE) - estats->rx_large++; + atomic64_inc(&estats->rx_large); else - estats->rx_short++; + atomic64_inc(&estats->rx_short); } if (status & RXBD_NONOCTET) { stats->rx_frame_errors++; - estats->rx_nonoctet++; + atomic64_inc(&estats->rx_nonoctet); } if (status & RXBD_CRCERR) { - estats->rx_crcerr++; + atomic64_inc(&estats->rx_crcerr); stats->rx_crc_errors++; } if (status & RXBD_OVERRUN) { - estats->rx_overrun++; + atomic64_inc(&estats->rx_overrun); stats->rx_crc_errors++; } } @@ -2674,8 +2688,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ -static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull, struct napi_struct *napi) +static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, + int amount_pull, struct napi_struct *napi) { struct gfar_private *priv = netdev_priv(dev); struct rxfcb *fcb = NULL; @@ -2722,10 +2736,8 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, /* Send the packet up the stack */ ret = napi_gro_receive(napi, skb); - if (GRO_DROP == ret) - priv->extra_stats.kernel_dropped++; - - return 0; + if (unlikely(GRO_DROP == ret)) + atomic64_inc(&priv->extra_stats.kernel_dropped); } /* gfar_clean_rx_ring() -- Processes each frame in the rx ring @@ -2746,7 +2758,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) bdp = rx_queue->cur_rx; base = rx_queue->rx_bd_base; - amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0); + amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { struct sk_buff *newskb; @@ -2758,7 +2770,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; - dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, + dma_unmap_single(priv->dev, bdp->bufPtr, priv->rx_buffer_size, DMA_FROM_DEVICE); if (unlikely(!(bdp->status & RXBD_ERR) && @@ -2791,7 +2803,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) } else { netif_warn(priv, rx_err, dev, "Missing skb!\n"); rx_queue->stats.rx_dropped++; - priv->extra_stats.rx_skbmissing++; + atomic64_inc(&priv->extra_stats.rx_skbmissing); } } @@ -3224,7 +3236,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id) netif_dbg(priv, tx_err, dev, "TX FIFO underrun, packet dropped\n"); dev->stats.tx_dropped++; - priv->extra_stats.tx_underrun++; + atomic64_inc(&priv->extra_stats.tx_underrun); local_irq_save(flags); lock_tx_qs(priv); @@ -3239,7 +3251,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id) } if (events & IEVENT_BSY) { dev->stats.rx_errors++; - priv->extra_stats.rx_bsy++; + atomic64_inc(&priv->extra_stats.rx_bsy); gfar_receive(irq, grp_id); @@ -3248,19 +3260,19 @@ static irqreturn_t gfar_error(int irq, void *grp_id) } if (events & IEVENT_BABR) { dev->stats.rx_errors++; - priv->extra_stats.rx_babr++; + atomic64_inc(&priv->extra_stats.rx_babr); netif_dbg(priv, rx_err, dev, "babbling RX error\n"); } if (events & IEVENT_EBERR) { - priv->extra_stats.eberr++; + atomic64_inc(&priv->extra_stats.eberr); netif_dbg(priv, rx_err, dev, "bus error\n"); } if (events & IEVENT_RXC) netif_dbg(priv, rx_status, dev, "control frame\n"); if (events & IEVENT_BABT) { - priv->extra_stats.tx_babt++; + atomic64_inc(&priv->extra_stats.tx_babt); netif_dbg(priv, tx_err, dev, "babbling TX error\n"); } return IRQ_HANDLED; diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 22eabc13ca99..63a28d294e20 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -627,36 +627,29 @@ struct rmon_mib }; struct gfar_extra_stats { - u64 kernel_dropped; - u64 rx_large; - u64 rx_short; - u64 rx_nonoctet; - u64 rx_crcerr; - u64 rx_overrun; - u64 rx_bsy; - u64 rx_babr; - u64 rx_trunc; - u64 eberr; - u64 tx_babt; - u64 tx_underrun; - u64 rx_skbmissing; - u64 tx_timeout; + atomic64_t kernel_dropped; + atomic64_t rx_large; + atomic64_t rx_short; + atomic64_t rx_nonoctet; + atomic64_t rx_crcerr; + atomic64_t rx_overrun; + atomic64_t rx_bsy; + atomic64_t rx_babr; + atomic64_t rx_trunc; + atomic64_t eberr; + atomic64_t tx_babt; + atomic64_t tx_underrun; + atomic64_t rx_skbmissing; + atomic64_t tx_timeout; }; #define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32)) -#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64)) +#define GFAR_EXTRA_STATS_LEN \ + (sizeof(struct gfar_extra_stats)/sizeof(atomic64_t)) -/* Number of stats in the stats structure (ignore car and cam regs)*/ +/* Number of stats exported via ethtool */ #define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN) -#define GFAR_INFOSTR_LEN 32 - -struct gfar_stats { - u64 extra[GFAR_EXTRA_STATS_LEN]; - u64 rmon[GFAR_RMON_LEN]; -}; - - struct gfar { u32 tsec_id; /* 0x.000 - Controller ID register */ u32 tsec_id2; /* 0x.004 - Controller ID2 register */ @@ -937,26 +930,25 @@ struct tx_q_stats { * @txtime: coalescing value if based on time */ struct gfar_priv_tx_q { + /* cacheline 1 */ spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES))); - struct sk_buff ** tx_skbuff; - /* Buffer descriptor pointers */ - dma_addr_t tx_bd_dma_base; struct txbd8 *tx_bd_base; struct txbd8 *cur_tx; - struct txbd8 *dirty_tx; + unsigned int num_txbdfree; + unsigned short skb_curtx; + unsigned short tx_ring_size; struct tx_q_stats stats; - struct net_device *dev; struct gfar_priv_grp *grp; - u16 skb_curtx; - u16 skb_dirtytx; - u16 qindex; - unsigned int tx_ring_size; - unsigned int num_txbdfree; + /* cacheline 2 */ + struct net_device *dev; + struct sk_buff **tx_skbuff; + struct txbd8 *dirty_tx; + unsigned short skb_dirtytx; + unsigned short qindex; /* Configuration info for the coalescing features */ - unsigned char txcoalescing; + unsigned int txcoalescing; unsigned long txic; - unsigned short txcount; - unsigned short txtime; + dma_addr_t tx_bd_dma_base; }; /* @@ -999,18 +991,25 @@ struct gfar_priv_rx_q { unsigned long rxic; }; +enum gfar_irqinfo_id { + GFAR_TX = 0, + GFAR_RX = 1, + GFAR_ER = 2, + GFAR_NUM_IRQS = 3 +}; + +struct gfar_irqinfo { + unsigned int irq; + char name[GFAR_INT_NAME_MAX]; +}; + /** * struct gfar_priv_grp - per group structure * @napi: the napi poll function * @priv: back pointer to the priv structure * @regs: the ioremapped register space for this group * @grp_id: group id for this group - * @interruptTransmit: The TX interrupt number for this group - * @interruptReceive: The RX interrupt number for this group - * @interruptError: The ERROR interrupt number for this group - * @int_name_tx: tx interrupt name for this group - * @int_name_rx: rx interrupt name for this group - * @int_name_er: er interrupt name for this group + * @irqinfo: TX/RX/ER irq data for this group */ struct gfar_priv_grp { @@ -1019,23 +1018,20 @@ struct gfar_priv_grp { struct gfar_private *priv; struct gfar __iomem *regs; unsigned int grp_id; - unsigned long rx_bit_map; - unsigned long tx_bit_map; - unsigned long num_tx_queues; unsigned long num_rx_queues; + unsigned long rx_bit_map; + /* cacheline 3 */ unsigned int rstat; unsigned int tstat; - unsigned int imask; - unsigned int ievent; - unsigned int interruptTransmit; - unsigned int interruptReceive; - unsigned int interruptError; - - char int_name_tx[GFAR_INT_NAME_MAX]; - char int_name_rx[GFAR_INT_NAME_MAX]; - char int_name_er[GFAR_INT_NAME_MAX]; + unsigned long num_tx_queues; + unsigned long tx_bit_map; + + struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS]; }; +#define gfar_irq(grp, ID) \ + ((grp)->irqinfo[GFAR_##ID]) + enum gfar_errata { GFAR_ERRATA_74 = 0x01, GFAR_ERRATA_76 = 0x02, @@ -1053,28 +1049,65 @@ enum gfar_errata { * the buffer descriptor determines the actual condition. */ struct gfar_private { - - /* Indicates how many tx, rx queues are enabled */ - unsigned int num_tx_queues; unsigned int num_rx_queues; - unsigned int num_grps; - unsigned int mode; - /* The total tx and rx ring size for the enabled queues */ - unsigned int total_tx_ring_size; - unsigned int total_rx_ring_size; - - struct device_node *node; + struct device *dev; struct net_device *ndev; - struct platform_device *ofdev; enum gfar_errata errata; + unsigned int rx_buffer_size; + + u16 uses_rxfcb; + u16 padding; + + /* HW time stamping enabled flag */ + int hwts_rx_en; + int hwts_tx_en; - struct gfar_priv_grp gfargrp[MAXGROUPS]; struct gfar_priv_tx_q *tx_queue[MAX_TX_QS]; struct gfar_priv_rx_q *rx_queue[MAX_RX_QS]; + struct gfar_priv_grp gfargrp[MAXGROUPS]; + + u32 device_flags; + + unsigned int mode; + unsigned int num_tx_queues; + unsigned int num_grps; + + /* Network Statistics */ + struct gfar_extra_stats extra_stats; + + /* PHY stuff */ + phy_interface_t interface; + struct device_node *phy_node; + struct device_node *tbi_node; + struct phy_device *phydev; + struct mii_bus *mii_bus; + int oldspeed; + int oldduplex; + int oldlink; + + /* Bitfield update lock */ + spinlock_t bflock; + + uint32_t msg_enable; + + struct work_struct reset_task; + + struct platform_device *ofdev; + unsigned char + extended_hash:1, + bd_stash_en:1, + rx_filer_enable:1, + /* Wake-on-LAN enabled */ + wol_en:1, + /* Enable priorty based Tx scheduling in Hw */ + prio_sched_en:1; + + /* The total tx and rx ring size for the enabled queues */ + unsigned int total_tx_ring_size; + unsigned int total_rx_ring_size; /* RX per device parameters */ - unsigned int rx_buffer_size; unsigned int rx_stash_size; unsigned int rx_stash_index; @@ -1093,39 +1126,6 @@ struct gfar_private { unsigned int fifo_starve; unsigned int fifo_starve_off; - /* Bitfield update lock */ - spinlock_t bflock; - - phy_interface_t interface; - struct device_node *phy_node; - struct device_node *tbi_node; - u32 device_flags; - unsigned char - extended_hash:1, - bd_stash_en:1, - rx_filer_enable:1, - wol_en:1, /* Wake-on-LAN enabled */ - prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */ - unsigned short padding; - - /* PHY stuff */ - struct phy_device *phydev; - struct mii_bus *mii_bus; - int oldspeed; - int oldduplex; - int oldlink; - - uint32_t msg_enable; - - struct work_struct reset_task; - - /* Network Statistics */ - struct gfar_extra_stats extra_stats; - - /* HW time stamping enabled flag */ - int hwts_rx_en; - int hwts_tx_en; - /*Filer table*/ unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; @@ -1138,16 +1138,16 @@ static inline int gfar_has_errata(struct gfar_private *priv, return priv->errata & err; } -static inline u32 gfar_read(volatile unsigned __iomem *addr) +static inline u32 gfar_read(unsigned __iomem *addr) { u32 val; - val = in_be32(addr); + val = ioread32be(addr); return val; } -static inline void gfar_write(volatile unsigned __iomem *addr, u32 val) +static inline void gfar_write(unsigned __iomem *addr, u32 val) { - out_be32(addr, val); + iowrite32be(val, addr); } static inline void gfar_write_filer(struct gfar_private *priv, diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index ab6762caa957..75e89acf4912 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -149,20 +149,17 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, int i; struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; - u64 *extra = (u64 *) & priv->extra_stats; + atomic64_t *extra = (atomic64_t *)&priv->extra_stats; + + for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) + buf[i] = atomic64_read(&extra[i]); if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { u32 __iomem *rmon = (u32 __iomem *) ®s->rmon; - struct gfar_stats *stats = (struct gfar_stats *) buf; - - for (i = 0; i < GFAR_RMON_LEN; i++) - stats->rmon[i] = (u64) gfar_read(&rmon[i]); - for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) - stats->extra[i] = extra[i]; - } else - for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) - buf[i] = extra[i]; + for (; i < GFAR_STATS_LEN; i++, rmon++) + buf[i] = (u64) gfar_read(rmon); + } } static int gfar_sset_count(struct net_device *dev, int sset) @@ -184,10 +181,11 @@ static int gfar_sset_count(struct net_device *dev, int sset) static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { - strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN); - strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN); - strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN); - strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN); + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, gfar_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info)); drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } @@ -715,12 +713,11 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, int j = MAX_FILER_IDX, l = 0x0; int ret = 1; - local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), - GFP_KERNEL); - local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), - GFP_KERNEL); + local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int), + GFP_KERNEL); + local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int), + GFP_KERNEL); if (!local_rqfpr || !local_rqfcr) { - pr_err("Out of memory\n"); ret = 0; goto err; } diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index 37b035306013..1ebf7128ec04 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -350,10 +350,10 @@ static void uec_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { - strncpy(drvinfo->driver, DRV_NAME, 32); - strncpy(drvinfo->version, DRV_VERSION, 32); - strncpy(drvinfo->fw_version, "N/A", 32); - strncpy(drvinfo->bus_info, "QUICC ENGINE", 32); + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info)); drvinfo->eedump_len = 0; drvinfo->regdump_len = uec_get_regs_len(netdev); } diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig index dffee9d44fd5..c6a87625898a 100644 --- a/drivers/net/ethernet/fujitsu/Kconfig +++ b/drivers/net/ethernet/fujitsu/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_FUJITSU bool "Fujitsu devices" default y - depends on ISA || PCMCIA || ((ISA || MCA_LEGACY) && EXPERIMENTAL) + depends on ISA || PCMCIA || (ISA && EXPERIMENTAL) ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -17,18 +17,6 @@ config NET_VENDOR_FUJITSU if NET_VENDOR_FUJITSU -config AT1700 - tristate "AT1700/1720 support (EXPERIMENTAL)" - depends on (ISA || MCA_LEGACY) && EXPERIMENTAL - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called at1700. - config PCMCIA_FMVJ18X tristate "Fujitsu FMV-J18x PCMCIA support" depends on PCMCIA @@ -40,15 +28,4 @@ config PCMCIA_FMVJ18X To compile this driver as a module, choose M here: the module will be called fmvj18x_cs. If unsure, say N. -config ETH16I - tristate "ICL EtherTeam 16i/32 support" - depends on ISA - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called eth16i. - endif # NET_VENDOR_FUJITSU diff --git a/drivers/net/ethernet/fujitsu/Makefile b/drivers/net/ethernet/fujitsu/Makefile index 2730ae67d3aa..21561fdcc69f 100644 --- a/drivers/net/ethernet/fujitsu/Makefile +++ b/drivers/net/ethernet/fujitsu/Makefile @@ -2,6 +2,4 @@ # Makefile for the Fujitsu network device drivers. # -obj-$(CONFIG_AT1700) += at1700.o -obj-$(CONFIG_ETH16I) += eth16i.o obj-$(CONFIG_PCMCIA_FMVJ18X) += fmvj18x_cs.o diff --git a/drivers/net/ethernet/fujitsu/at1700.c b/drivers/net/ethernet/fujitsu/at1700.c deleted file mode 100644 index 4b80dc4531ad..000000000000 --- a/drivers/net/ethernet/fujitsu/at1700.c +++ /dev/null @@ -1,791 +0,0 @@ -/* at1700.c: A network device driver for the Allied Telesis AT1700. - - Written 1993-98 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - This is a device driver for the Allied Telesis AT1700, and - Fujitsu FMV-181/182/181A/182A/183/184/183A/184A, which are - straight-forward Fujitsu MB86965 implementations. - - Modification for Fujitsu FMV-18X cards is done by Yutaka Tamiya - (tamy@flab.fujitsu.co.jp). - - Sources: - The Fujitsu MB86965 datasheet. - - After the initial version of this driver was written Gerry Sawkins of - ATI provided their EEPROM configuration code header file. - Thanks to NIIBE Yutaka <gniibe@mri.co.jp> for bug fixes. - - MCA bus (AT1720) support (now deleted) by Rene Schmit <rene@bss.lu> - - Bugs: - The MB86965 has a design flaw that makes all probes unreliable. Not - only is it difficult to detect, it also moves around in I/O space in - response to inb()s from other device probes! -*/ - -#include <linux/errno.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/skbuff.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/crc32.h> -#include <linux/bitops.h> - -#include <asm/io.h> -#include <asm/dma.h> - -static char version[] __initdata = - "at1700.c:v1.16 9/11/06 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#define DRV_NAME "at1700" - -/* Tunable parameters. */ - -/* When to switch from the 64-entry multicast filter to Rx-all-multicast. */ -#define MC_FILTERBREAK 64 - -/* These unusual address orders are used to verify the CONFIG register. */ - -static int fmv18x_probe_list[] __initdata = { - 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x300, 0x340, 0 -}; - -/* - * ISA - */ - -static unsigned at1700_probe_list[] __initdata = { - 0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0 -}; - -/* use 0 for production, 1 for verification, >2 for debug */ -#ifndef NET_DEBUG -#define NET_DEBUG 1 -#endif -static unsigned int net_debug = NET_DEBUG; - -typedef unsigned char uchar; - -/* Information that need to be kept for each board. */ -struct net_local { - spinlock_t lock; - unsigned char mc_filter[8]; - uint jumpered:1; /* Set iff the board has jumper config. */ - uint tx_started:1; /* Packets are on the Tx queue. */ - uint tx_queue_ready:1; /* Tx queue is ready to be sent. */ - uint rx_started:1; /* Packets are Rxing. */ - uchar tx_queue; /* Number of packet on the Tx queue. */ - ushort tx_queue_len; /* Current length of the Tx queue. */ -}; - - -/* Offsets from the base address. */ -#define STATUS 0 -#define TX_STATUS 0 -#define RX_STATUS 1 -#define TX_INTR 2 /* Bit-mapped interrupt enable registers. */ -#define RX_INTR 3 -#define TX_MODE 4 -#define RX_MODE 5 -#define CONFIG_0 6 /* Misc. configuration settings. */ -#define CONFIG_1 7 -/* Run-time register bank 2 definitions. */ -#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */ -#define TX_START 10 -#define COL16CNTL 11 /* Control Reg for 16 collisions */ -#define MODE13 13 -#define RX_CTRL 14 -/* Configuration registers only on the '865A/B chips. */ -#define EEPROM_Ctrl 16 -#define EEPROM_Data 17 -#define CARDSTATUS 16 /* FMV-18x Card Status */ -#define CARDSTATUS1 17 /* FMV-18x Card Status */ -#define IOCONFIG 18 /* Either read the jumper, or move the I/O. */ -#define IOCONFIG1 19 -#define SAPROM 20 /* The station address PROM, if no EEPROM. */ -#define MODE24 24 -#define RESET 31 /* Write to reset some parts of the chip. */ -#define AT1700_IO_EXTENT 32 -#define PORT_OFFSET(o) (o) - - -#define TX_TIMEOUT (HZ/10) - - -/* Index to functions, as function prototypes. */ - -static int at1700_probe1(struct net_device *dev, int ioaddr); -static int read_eeprom(long ioaddr, int location); -static int net_open(struct net_device *dev); -static netdev_tx_t net_send_packet(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t net_interrupt(int irq, void *dev_id); -static void net_rx(struct net_device *dev); -static int net_close(struct net_device *dev); -static void set_rx_mode(struct net_device *dev); -static void net_tx_timeout (struct net_device *dev); - - -/* Check for a network adaptor of this type, and return '0' iff one exists. - If dev->base_addr == 0, probe all likely locations. - If dev->base_addr == 1, always return failure. - If dev->base_addr == 2, allocate space for the device and return success - (detachable devices only). - */ - -static int io = 0x260; - -static int irq; - -static void cleanup_card(struct net_device *dev) -{ - free_irq(dev->irq, NULL); - release_region(dev->base_addr, AT1700_IO_EXTENT); -} - -struct net_device * __init at1700_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); - unsigned *port; - int err = 0; - - if (!dev) - return ERR_PTR(-ENODEV); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - io = dev->base_addr; - irq = dev->irq; - } else { - dev->base_addr = io; - dev->irq = irq; - } - - if (io > 0x1ff) { /* Check a single specified location. */ - err = at1700_probe1(dev, io); - } else if (io != 0) { /* Don't probe at all. */ - err = -ENXIO; - } else { - for (port = at1700_probe_list; *port; port++) { - if (at1700_probe1(dev, *port) == 0) - break; - dev->irq = irq; - } - if (!*port) - err = -ENODEV; - } - if (err) - goto out; - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - cleanup_card(dev); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static const struct net_device_ops at1700_netdev_ops = { - .ndo_open = net_open, - .ndo_stop = net_close, - .ndo_start_xmit = net_send_packet, - .ndo_set_rx_mode = set_rx_mode, - .ndo_tx_timeout = net_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/* The Fujitsu datasheet suggests that the NIC be probed for by checking its - "signature", the default bit pattern after a reset. This *doesn't* work -- - there is no way to reset the bus interface without a complete power-cycle! - - It turns out that ATI came to the same conclusion I did: the only thing - that can be done is checking a few bits and then diving right into an - EEPROM read. */ - -static int __init at1700_probe1(struct net_device *dev, int ioaddr) -{ - static const char fmv_irqmap[4] = {3, 7, 10, 15}; - static const char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15}; - static const char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15}; - unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0; - int ret = -ENODEV; - struct net_local *lp = netdev_priv(dev); - - if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - /* Resetting the chip doesn't reset the ISA interface, so don't bother. - That means we have to be careful with the register values we probe - for. - */ -#ifdef notdef - printk("at1700 probe at %#x, eeprom is %4.4x %4.4x %4.4x ctrl %4.4x.\n", - ioaddr, read_eeprom(ioaddr, 4), read_eeprom(ioaddr, 5), - read_eeprom(ioaddr, 6), inw(ioaddr + EEPROM_Ctrl)); -#endif - /* We must check for the EEPROM-config boards first, else accessing - IOCONFIG0 will move the board! */ - if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr && - read_eeprom(ioaddr, 4) == 0x0000 && - (read_eeprom(ioaddr, 5) & 0xff00) == 0xF400) - is_at1700 = 1; - else if (inb(ioaddr + SAPROM ) == 0x00 && - inb(ioaddr + SAPROM + 1) == 0x00 && - inb(ioaddr + SAPROM + 2) == 0x0e) - is_fmv18x = 1; - else { - goto err_out; - } - - /* Reset the internal state machines. */ - outb(0, ioaddr + RESET); - - if (is_at1700) { - irq = at1700_irqmap[(read_eeprom(ioaddr, 12)&0x04) - | (read_eeprom(ioaddr, 0)>>14)]; - } else { - /* Check PnP mode for FMV-183/184/183A/184A. */ - /* This PnP routine is very poor. IO and IRQ should be known. */ - if (inb(ioaddr + CARDSTATUS1) & 0x20) { - irq = dev->irq; - for (i = 0; i < 8; i++) { - if (irq == fmv_irqmap_pnp[i]) - break; - } - if (i == 8) { - goto err_out; - } - } else { - if (fmv18x_probe_list[inb(ioaddr + IOCONFIG) & 0x07] != ioaddr) - goto err_out; - irq = fmv_irqmap[(inb(ioaddr + IOCONFIG)>>6) & 0x03]; - } - } - - printk("%s: %s found at %#3x, IRQ %d, address ", dev->name, - is_at1700 ? "AT1700" : "FMV-18X", ioaddr, irq); - - dev->base_addr = ioaddr; - dev->irq = irq; - - if (is_at1700) { - for(i = 0; i < 3; i++) { - unsigned short eeprom_val = read_eeprom(ioaddr, 4+i); - ((unsigned short *)dev->dev_addr)[i] = ntohs(eeprom_val); - } - } else { - for(i = 0; i < 6; i++) { - unsigned char val = inb(ioaddr + SAPROM + i); - dev->dev_addr[i] = val; - } - } - printk("%pM", dev->dev_addr); - - /* The EEPROM word 12 bit 0x0400 means use regular 100 ohm 10baseT signals, - rather than 150 ohm shielded twisted pair compensation. - 0x0000 == auto-sense the interface - 0x0800 == use TP interface - 0x1800 == use coax interface - */ - { - const char *porttype[] = {"auto-sense", "10baseT", "auto-sense", "10base2"}; - if (is_at1700) { - ushort setup_value = read_eeprom(ioaddr, 12); - dev->if_port = setup_value >> 8; - } else { - ushort setup_value = inb(ioaddr + CARDSTATUS); - switch (setup_value & 0x07) { - case 0x01: /* 10base5 */ - case 0x02: /* 10base2 */ - dev->if_port = 0x18; break; - case 0x04: /* 10baseT */ - dev->if_port = 0x08; break; - default: /* auto-sense */ - dev->if_port = 0x00; break; - } - } - printk(" %s interface.\n", porttype[(dev->if_port>>3) & 3]); - } - - /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit - bus access, two 4K Tx queues, and disabled Tx and Rx. */ - outb(0xda, ioaddr + CONFIG_0); - - /* Set the station address in bank zero. */ - outb(0x00, ioaddr + CONFIG_1); - for (i = 0; i < 6; i++) - outb(dev->dev_addr[i], ioaddr + PORT_OFFSET(8 + i)); - - /* Switch to bank 1 and set the multicast table to accept none. */ - outb(0x04, ioaddr + CONFIG_1); - for (i = 0; i < 8; i++) - outb(0x00, ioaddr + PORT_OFFSET(8 + i)); - - - /* Switch to bank 2 */ - /* Lock our I/O address, and set manual processing mode for 16 collisions. */ - outb(0x08, ioaddr + CONFIG_1); - outb(dev->if_port, ioaddr + MODE13); - outb(0x00, ioaddr + COL16CNTL); - - if (net_debug) - printk(version); - - dev->netdev_ops = &at1700_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - - spin_lock_init(&lp->lock); - - lp->jumpered = is_fmv18x; - /* Snarf the interrupt vector now. */ - ret = request_irq(irq, net_interrupt, 0, DRV_NAME, dev); - if (ret) { - printk(KERN_ERR "AT1700 at %#3x is unusable due to a " - "conflict on IRQ %d.\n", - ioaddr, irq); - goto err_out; - } - - return 0; - -err_out: - release_region(ioaddr, AT1700_IO_EXTENT); - return ret; -} - - -/* EEPROM_Ctrl bits. */ -#define EE_SHIFT_CLK 0x40 /* EEPROM shift clock, in reg. 16. */ -#define EE_CS 0x20 /* EEPROM chip select, in reg. 16. */ -#define EE_DATA_WRITE 0x80 /* EEPROM chip data in, in reg. 17. */ -#define EE_DATA_READ 0x80 /* EEPROM chip data out, in reg. 17. */ - -/* The EEPROM commands include the alway-set leading bit. */ -#define EE_WRITE_CMD (5 << 6) -#define EE_READ_CMD (6 << 6) -#define EE_ERASE_CMD (7 << 6) - -static int __init read_eeprom(long ioaddr, int location) -{ - int i; - unsigned short retval = 0; - long ee_addr = ioaddr + EEPROM_Ctrl; - long ee_daddr = ioaddr + EEPROM_Data; - int read_cmd = location | EE_READ_CMD; - - /* Shift the read command bits out. */ - for (i = 9; i >= 0; i--) { - short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; - outb(EE_CS, ee_addr); - outb(dataval, ee_daddr); - outb(EE_CS | EE_SHIFT_CLK, ee_addr); /* EEPROM clock tick. */ - } - outb(EE_DATA_WRITE, ee_daddr); - for (i = 16; i > 0; i--) { - outb(EE_CS, ee_addr); - outb(EE_CS | EE_SHIFT_CLK, ee_addr); - retval = (retval << 1) | ((inb(ee_daddr) & EE_DATA_READ) ? 1 : 0); - } - - /* Terminate the EEPROM access. */ - outb(EE_CS, ee_addr); - outb(EE_SHIFT_CLK, ee_addr); - outb(0, ee_addr); - return retval; -} - - - -static int net_open(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit - bus access, and two 4K Tx queues. */ - outb(0x5a, ioaddr + CONFIG_0); - - /* Powerup, switch to register bank 2, and enable the Rx and Tx. */ - outb(0xe8, ioaddr + CONFIG_1); - - lp->tx_started = 0; - lp->tx_queue_ready = 1; - lp->rx_started = 0; - lp->tx_queue = 0; - lp->tx_queue_len = 0; - - /* Turn on hardware Tx and Rx interrupts. */ - outb(0x82, ioaddr + TX_INTR); - outb(0x81, ioaddr + RX_INTR); - - /* Enable the IRQ on boards of fmv18x it is feasible. */ - if (lp->jumpered) { - outb(0x80, ioaddr + IOCONFIG1); - } - - netif_start_queue(dev); - return 0; -} - -static void net_tx_timeout (struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - printk ("%s: transmit timed out with status %04x, %s?\n", dev->name, - inw (ioaddr + STATUS), inb (ioaddr + TX_STATUS) & 0x80 - ? "IRQ conflict" : "network cable problem"); - printk ("%s: timeout registers: %04x %04x %04x %04x %04x %04x %04x %04x.\n", - dev->name, inw(ioaddr + TX_STATUS), inw(ioaddr + TX_INTR), inw(ioaddr + TX_MODE), - inw(ioaddr + CONFIG_0), inw(ioaddr + DATAPORT), inw(ioaddr + TX_START), - inw(ioaddr + MODE13 - 1), inw(ioaddr + RX_CTRL)); - dev->stats.tx_errors++; - /* ToDo: We should try to restart the adaptor... */ - outw(0xffff, ioaddr + MODE24); - outw (0xffff, ioaddr + TX_STATUS); - outb (0x5a, ioaddr + CONFIG_0); - outb (0xe8, ioaddr + CONFIG_1); - outw (0x8182, ioaddr + TX_INTR); - outb (0x00, ioaddr + TX_START); - outb (0x03, ioaddr + COL16CNTL); - - dev->trans_start = jiffies; /* prevent tx timeout */ - - lp->tx_started = 0; - lp->tx_queue_ready = 1; - lp->rx_started = 0; - lp->tx_queue = 0; - lp->tx_queue_len = 0; - - netif_wake_queue(dev); -} - - -static netdev_tx_t net_send_packet (struct sk_buff *skb, - struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; - short len = skb->len; - unsigned char *buf = skb->data; - static u8 pad[ETH_ZLEN]; - - netif_stop_queue (dev); - - /* We may not start transmitting unless we finish transferring - a packet into the Tx queue. During executing the following - codes we possibly catch a Tx interrupt. Thus we flag off - tx_queue_ready, so that we prevent the interrupt routine - (net_interrupt) to start transmitting. */ - lp->tx_queue_ready = 0; - { - outw (length, ioaddr + DATAPORT); - /* Packet data */ - outsw (ioaddr + DATAPORT, buf, len >> 1); - /* Check for dribble byte */ - if (len & 1) { - outw(skb->data[skb->len-1], ioaddr + DATAPORT); - len++; - } - /* Check for packet padding */ - if (length != skb->len) - outsw(ioaddr + DATAPORT, pad, (length - len + 1) >> 1); - - lp->tx_queue++; - lp->tx_queue_len += length + 2; - } - lp->tx_queue_ready = 1; - - if (lp->tx_started == 0) { - /* If the Tx is idle, always trigger a transmit. */ - outb (0x80 | lp->tx_queue, ioaddr + TX_START); - lp->tx_queue = 0; - lp->tx_queue_len = 0; - lp->tx_started = 1; - netif_start_queue (dev); - } else if (lp->tx_queue_len < 4096 - 1502) - /* Yes, there is room for one more packet. */ - netif_start_queue (dev); - dev_kfree_skb (skb); - - return NETDEV_TX_OK; -} - -/* The typical workload of the driver: - Handle the network interface interrupts. */ -static irqreturn_t net_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct net_local *lp; - int ioaddr, status; - int handled = 0; - - if (dev == NULL) { - printk ("at1700_interrupt(): irq %d for unknown device.\n", irq); - return IRQ_NONE; - } - - ioaddr = dev->base_addr; - lp = netdev_priv(dev); - - spin_lock (&lp->lock); - - status = inw(ioaddr + TX_STATUS); - outw(status, ioaddr + TX_STATUS); - - if (net_debug > 4) - printk("%s: Interrupt with status %04x.\n", dev->name, status); - if (lp->rx_started == 0 && - (status & 0xff00 || (inb(ioaddr + RX_MODE) & 0x40) == 0)) { - /* Got a packet(s). - We cannot execute net_rx more than once at the same time for - the same device. During executing net_rx, we possibly catch a - Tx interrupt. Thus we flag on rx_started, so that we prevent - the interrupt routine (net_interrupt) to dive into net_rx - again. */ - handled = 1; - lp->rx_started = 1; - outb(0x00, ioaddr + RX_INTR); /* Disable RX intr. */ - net_rx(dev); - outb(0x81, ioaddr + RX_INTR); /* Enable RX intr. */ - lp->rx_started = 0; - } - if (status & 0x00ff) { - handled = 1; - if (status & 0x02) { - /* More than 16 collisions occurred */ - if (net_debug > 4) - printk("%s: 16 Collision occur during Txing.\n", dev->name); - /* Cancel sending a packet. */ - outb(0x03, ioaddr + COL16CNTL); - dev->stats.collisions++; - } - if (status & 0x82) { - dev->stats.tx_packets++; - /* The Tx queue has any packets and is not being - transferred a packet from the host, start - transmitting. */ - if (lp->tx_queue && lp->tx_queue_ready) { - outb(0x80 | lp->tx_queue, ioaddr + TX_START); - lp->tx_queue = 0; - lp->tx_queue_len = 0; - dev->trans_start = jiffies; - netif_wake_queue (dev); - } else { - lp->tx_started = 0; - netif_wake_queue (dev); - } - } - } - - spin_unlock (&lp->lock); - return IRQ_RETVAL(handled); -} - -/* We have a good packet(s), get it/them out of the buffers. */ -static void -net_rx(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - int boguscount = 5; - - while ((inb(ioaddr + RX_MODE) & 0x40) == 0) { - ushort status = inw(ioaddr + DATAPORT); - ushort pkt_len = inw(ioaddr + DATAPORT); - - if (net_debug > 4) - printk("%s: Rxing packet mode %02x status %04x.\n", - dev->name, inb(ioaddr + RX_MODE), status); -#ifndef final_version - if (status == 0) { - outb(0x05, ioaddr + RX_CTRL); - break; - } -#endif - - if ((status & 0xF0) != 0x20) { /* There was an error. */ - dev->stats.rx_errors++; - if (status & 0x08) dev->stats.rx_length_errors++; - if (status & 0x04) dev->stats.rx_frame_errors++; - if (status & 0x02) dev->stats.rx_crc_errors++; - if (status & 0x01) dev->stats.rx_over_errors++; - } else { - /* Malloc up new buffer. */ - struct sk_buff *skb; - - if (pkt_len > 1550) { - printk("%s: The AT1700 claimed a very large packet, size %d.\n", - dev->name, pkt_len); - /* Prime the FIFO and then flush the packet. */ - inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); - outb(0x05, ioaddr + RX_CTRL); - dev->stats.rx_errors++; - break; - } - skb = netdev_alloc_skb(dev, pkt_len + 3); - if (skb == NULL) { - printk("%s: Memory squeeze, dropping packet (len %d).\n", - dev->name, pkt_len); - /* Prime the FIFO and then flush the packet. */ - inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); - outb(0x05, ioaddr + RX_CTRL); - dev->stats.rx_dropped++; - break; - } - skb_reserve(skb,2); - - insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); - skb->protocol=eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - if (--boguscount <= 0) - break; - } - - /* If any worth-while packets have been received, dev_rint() - has done a mark_bh(NET_BH) for us and will work on them - when we get to the bottom-half routine. */ - { - int i; - for (i = 0; i < 20; i++) { - if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40) - break; - inw(ioaddr + DATAPORT); /* dummy status read */ - outb(0x05, ioaddr + RX_CTRL); - } - - if (net_debug > 5) - printk("%s: Exint Rx packet with mode %02x after %d ticks.\n", - dev->name, inb(ioaddr + RX_MODE), i); - } -} - -/* The inverse routine to net_open(). */ -static int net_close(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - netif_stop_queue(dev); - - /* Set configuration register 0 to disable Tx and Rx. */ - outb(0xda, ioaddr + CONFIG_0); - - /* No statistic counters on the chip to update. */ - - /* Disable the IRQ on boards of fmv18x where it is feasible. */ - if (lp->jumpered) - outb(0x00, ioaddr + IOCONFIG1); - - /* Power-down the chip. Green, green, green! */ - outb(0x00, ioaddr + CONFIG_1); - return 0; -} - -/* - Set the multicast/promiscuous mode for this adaptor. -*/ - -static void -set_rx_mode(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - struct net_local *lp = netdev_priv(dev); - unsigned char mc_filter[8]; /* Multicast hash filter */ - unsigned long flags; - - if (dev->flags & IFF_PROMISC) { - memset(mc_filter, 0xff, sizeof(mc_filter)); - outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */ - } else if (netdev_mc_count(dev) > MC_FILTERBREAK || - (dev->flags & IFF_ALLMULTI)) { - /* Too many to filter perfectly -- accept all multicasts. */ - memset(mc_filter, 0xff, sizeof(mc_filter)); - outb(2, ioaddr + RX_MODE); /* Use normal mode. */ - } else if (netdev_mc_empty(dev)) { - memset(mc_filter, 0x00, sizeof(mc_filter)); - outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ - } else { - struct netdev_hw_addr *ha; - - memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(ha, dev) { - unsigned int bit = - ether_crc_le(ETH_ALEN, ha->addr) >> 26; - mc_filter[bit >> 3] |= (1 << bit); - } - outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */ - } - - spin_lock_irqsave (&lp->lock, flags); - if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) { - int i; - int saved_bank = inw(ioaddr + CONFIG_0); - /* Switch to bank 1 and set the multicast table. */ - outw((saved_bank & ~0x0C00) | 0x0480, ioaddr + CONFIG_0); - for (i = 0; i < 8; i++) - outb(mc_filter[i], ioaddr + PORT_OFFSET(8 + i)); - memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter)); - outw(saved_bank, ioaddr + CONFIG_0); - } - spin_unlock_irqrestore (&lp->lock, flags); -} - -#ifdef MODULE -static struct net_device *dev_at1700; - -module_param(io, int, 0); -module_param(irq, int, 0); -module_param(net_debug, int, 0); -MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address"); -MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number"); -MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)"); - -static int __init at1700_module_init(void) -{ - if (io == 0) - printk("at1700: You should not use auto-probing with insmod!\n"); - dev_at1700 = at1700_probe(-1); - if (IS_ERR(dev_at1700)) - return PTR_ERR(dev_at1700); - return 0; -} - -static void __exit at1700_module_exit(void) -{ - unregister_netdev(dev_at1700); - cleanup_card(dev_at1700); - free_netdev(dev_at1700); -} -module_init(at1700_module_init); -module_exit(at1700_module_exit); -#endif /* MODULE */ -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/fujitsu/eth16i.c b/drivers/net/ethernet/fujitsu/eth16i.c deleted file mode 100644 index a992d1f7e0d2..000000000000 --- a/drivers/net/ethernet/fujitsu/eth16i.c +++ /dev/null @@ -1,1483 +0,0 @@ -/* eth16i.c An ICL EtherTeam 16i and 32 EISA ethernet driver for Linux - - Written 1994-1999 by Mika Kuoppala - - Copyright (C) 1994-1999 by Mika Kuoppala - Based on skeleton.c and heavily on at1700.c by Donald Becker - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as miku@iki.fi - - This driver supports following cards : - - ICL EtherTeam 16i - - ICL EtherTeam 32 EISA - (Uses true 32 bit transfers rather than 16i compatibility mode) - - Example Module usage: - insmod eth16i.o io=0x2a0 mediatype=bnc - - mediatype can be one of the following: bnc,tp,dix,auto,eprom - - 'auto' will try to autoprobe mediatype. - 'eprom' will use whatever type defined in eprom. - - I have benchmarked driver with PII/300Mhz as a ftp client - and 486/33Mhz as a ftp server. Top speed was 1128.37 kilobytes/sec. - - Sources: - - skeleton.c a sample network driver core for linux, - written by Donald Becker <becker@scyld.com> - - at1700.c a driver for Allied Telesis AT1700, written - by Donald Becker. - - e16iSRV.asm a Netware 3.X Server Driver for ICL EtherTeam16i - written by Markku Viima - - The Fujitsu MB86965 databook. - - Author thanks following persons due to their valueble assistance: - Markku Viima (ICL) - Ari Valve (ICL) - Donald Becker - Kurt Huwig <kurt@huwig.de> - - Revision history: - - Version Date Description - - 0.01 15.12-94 Initial version (card detection) - 0.02 23.01-95 Interrupt is now hooked correctly - 0.03 01.02-95 Rewrote initialization part - 0.04 07.02-95 Base skeleton done... - Made a few changes to signature checking - to make it a bit reliable. - - fixed bug in tx_buf mapping - - fixed bug in initialization (DLC_EN - wasn't enabled when initialization - was done.) - 0.05 08.02-95 If there were more than one packet to send, - transmit was jammed due to invalid - register write...now fixed - 0.06 19.02-95 Rewrote interrupt handling - 0.07 13.04-95 Wrote EEPROM read routines - Card configuration now set according to - data read from EEPROM - 0.08 23.06-95 Wrote part that tries to probe used interface - port if AUTO is selected - - 0.09 01.09-95 Added module support - - 0.10 04.09-95 Fixed receive packet allocation to work - with kernels > 1.3.x - - 0.20 20.09-95 Added support for EtherTeam32 EISA - - 0.21 17.10-95 Removed the unnecessary extern - init_etherdev() declaration. Some - other cleanups. - - 0.22 22.02-96 Receive buffer was not flushed - correctly when faulty packet was - received. Now fixed. - - 0.23 26.02-96 Made resetting the adapter - more reliable. - - 0.24 27.02-96 Rewrote faulty packet handling in eth16i_rx - - 0.25 22.05-96 kfree() was missing from cleanup_module. - - 0.26 11.06-96 Sometimes card was not found by - check_signature(). Now made more reliable. - - 0.27 23.06-96 Oops. 16 consecutive collisions halted - adapter. Now will try to retransmit - MAX_COL_16 times before finally giving up. - - 0.28 28.10-97 Added dev_id parameter (NULL) for free_irq - - 0.29 29.10-97 Multiple card support for module users - - 0.30 30.10-97 Fixed irq allocation bug. - (request_irq moved from probe to open) - - 0.30a 21.08-98 Card detection made more relaxed. Driver - had problems with some TCP/IP-PROM boots - to find the card. Suggested by - Kurt Huwig <kurt@huwig.de> - - 0.31 28.08-98 Media interface port can now be selected - with module parameters or kernel - boot parameters. - - 0.32 31.08-98 IRQ was never freed if open/close - pair wasn't called. Now fixed. - - 0.33 10.09-98 When eth16i_open() was called after - eth16i_close() chip never recovered. - Now more shallow reset is made on - close. - - 0.34 29.06-99 Fixed one bad #ifdef. - Changed ioaddr -> io for consistency - - 0.35 01.07-99 transmit,-receive bytes were never - updated in stats. - - Bugs: - In some cases the media interface autoprobing code doesn't find - the correct interface type. In this case you can - manually choose the interface type in DOS with E16IC.EXE which is - configuration software for EtherTeam16i and EtherTeam32 cards. - This is also true for IRQ setting. You cannot use module - parameter to configure IRQ of the card (yet). - - To do: - - Real multicast support - - Rewrite the media interface autoprobing code. Its _horrible_ ! - - Possibly merge all the MB86965 specific code to external - module for use by eth16.c and Donald's at1700.c - - IRQ configuration with module parameter. I will do - this when i will get enough info about setting - irq without configuration utility. -*/ - -static char *version = - "eth16i.c: v0.35 01-Jul-1999 Mika Kuoppala (miku@iki.fi)\n"; - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/bitops.h> -#include <linux/jiffies.h> -#include <linux/io.h> - -#include <asm/dma.h> - - - -/* Few macros */ -#define BITSET(ioaddr, bnum) ((outb(((inb(ioaddr)) | (bnum)), ioaddr))) -#define BITCLR(ioaddr, bnum) ((outb(((inb(ioaddr)) & (~(bnum))), ioaddr))) - -/* This is the I/O address space for Etherteam 16i adapter. */ -#define ETH16I_IO_EXTENT 32 - -/* Ticks before deciding that transmit has timed out */ -#define TX_TIMEOUT (400*HZ/1000) - -/* Maximum loop count when receiving packets */ -#define MAX_RX_LOOP 20 - -/* Some interrupt masks */ -#define ETH16I_INTR_ON 0xef8a /* Higher is receive mask */ -#define ETH16I_INTR_OFF 0x0000 - -/* Buffers header status byte meanings */ -#define PKT_GOOD BIT(5) -#define PKT_GOOD_RMT BIT(4) -#define PKT_SHORT BIT(3) -#define PKT_ALIGN_ERR BIT(2) -#define PKT_CRC_ERR BIT(1) -#define PKT_RX_BUF_OVERFLOW BIT(0) - -/* Transmit status register (DLCR0) */ -#define TX_STATUS_REG 0 -#define TX_DONE BIT(7) -#define NET_BUSY BIT(6) -#define TX_PKT_RCD BIT(5) -#define CR_LOST BIT(4) -#define TX_JABBER_ERR BIT(3) -#define COLLISION BIT(2) -#define COLLISIONS_16 BIT(1) - -/* Receive status register (DLCR1) */ -#define RX_STATUS_REG 1 -#define RX_PKT BIT(7) /* Packet received */ -#define BUS_RD_ERR BIT(6) -#define SHORT_PKT_ERR BIT(3) -#define ALIGN_ERR BIT(2) -#define CRC_ERR BIT(1) -#define RX_BUF_OVERFLOW BIT(0) - -/* Transmit Interrupt Enable Register (DLCR2) */ -#define TX_INTR_REG 2 -#define TX_INTR_DONE BIT(7) -#define TX_INTR_COL BIT(2) -#define TX_INTR_16_COL BIT(1) - -/* Receive Interrupt Enable Register (DLCR3) */ -#define RX_INTR_REG 3 -#define RX_INTR_RECEIVE BIT(7) -#define RX_INTR_SHORT_PKT BIT(3) -#define RX_INTR_CRC_ERR BIT(1) -#define RX_INTR_BUF_OVERFLOW BIT(0) - -/* Transmit Mode Register (DLCR4) */ -#define TRANSMIT_MODE_REG 4 -#define LOOPBACK_CONTROL BIT(1) -#define CONTROL_OUTPUT BIT(2) - -/* Receive Mode Register (DLCR5) */ -#define RECEIVE_MODE_REG 5 -#define RX_BUFFER_EMPTY BIT(6) -#define ACCEPT_BAD_PACKETS BIT(5) -#define RECEIVE_SHORT_ADDR BIT(4) -#define ACCEPT_SHORT_PACKETS BIT(3) -#define REMOTE_RESET BIT(2) - -#define ADDRESS_FILTER_MODE BIT(1) | BIT(0) -#define REJECT_ALL 0 -#define ACCEPT_ALL 3 -#define MODE_1 1 /* NODE ID, BC, MC, 2-24th bit */ -#define MODE_2 2 /* NODE ID, BC, MC, Hash Table */ - -/* Configuration Register 0 (DLCR6) */ -#define CONFIG_REG_0 6 -#define DLC_EN BIT(7) -#define SRAM_CYCLE_TIME_100NS BIT(6) -#define SYSTEM_BUS_WIDTH_8 BIT(5) /* 1 = 8bit, 0 = 16bit */ -#define BUFFER_WIDTH_8 BIT(4) /* 1 = 8bit, 0 = 16bit */ -#define TBS1 BIT(3) -#define TBS0 BIT(2) -#define SRAM_BS1 BIT(1) /* 00=8kb, 01=16kb */ -#define SRAM_BS0 BIT(0) /* 10=32kb, 11=64kb */ - -#ifndef ETH16I_TX_BUF_SIZE /* 0 = 2kb, 1 = 4kb */ -#define ETH16I_TX_BUF_SIZE 3 /* 2 = 8kb, 3 = 16kb */ -#endif -#define TX_BUF_1x2048 0 -#define TX_BUF_2x2048 1 -#define TX_BUF_2x4098 2 -#define TX_BUF_2x8192 3 - -/* Configuration Register 1 (DLCR7) */ -#define CONFIG_REG_1 7 -#define POWERUP BIT(5) - -/* Transmit start register */ -#define TRANSMIT_START_REG 10 -#define TRANSMIT_START_RB 2 -#define TX_START BIT(7) /* Rest of register bit indicate*/ - /* number of packets in tx buffer*/ -/* Node ID registers (DLCR8-13) */ -#define NODE_ID_0 8 -#define NODE_ID_RB 0 - -/* Hash Table registers (HT8-15) */ -#define HASH_TABLE_0 8 -#define HASH_TABLE_RB 1 - -/* Buffer memory ports */ -#define BUFFER_MEM_PORT_LB 8 -#define DATAPORT BUFFER_MEM_PORT_LB -#define BUFFER_MEM_PORT_HB 9 - -/* 16 Collision control register (BMPR11) */ -#define COL_16_REG 11 -#define HALT_ON_16 0x00 -#define RETRANS_AND_HALT_ON_16 0x02 - -/* Maximum number of attempts to send after 16 concecutive collisions */ -#define MAX_COL_16 10 - -/* DMA Burst and Transceiver Mode Register (BMPR13) */ -#define TRANSCEIVER_MODE_REG 13 -#define TRANSCEIVER_MODE_RB 2 -#define IO_BASE_UNLOCK BIT(7) -#define LOWER_SQUELCH_TRESH BIT(6) -#define LINK_TEST_DISABLE BIT(5) -#define AUI_SELECT BIT(4) -#define DIS_AUTO_PORT_SEL BIT(3) - -/* Filter Self Receive Register (BMPR14) */ -#define FILTER_SELF_RX_REG 14 -#define SKIP_RX_PACKET BIT(2) -#define FILTER_SELF_RECEIVE BIT(0) - -/* EEPROM Control Register (BMPR 16) */ -#define EEPROM_CTRL_REG 16 - -/* EEPROM Data Register (BMPR 17) */ -#define EEPROM_DATA_REG 17 - -/* NMC93CSx6 EEPROM Control Bits */ -#define CS_0 0x00 -#define CS_1 0x20 -#define SK_0 0x00 -#define SK_1 0x40 -#define DI_0 0x00 -#define DI_1 0x80 - -/* NMC93CSx6 EEPROM Instructions */ -#define EEPROM_READ 0x80 - -/* NMC93CSx6 EEPROM Addresses */ -#define E_NODEID_0 0x02 -#define E_NODEID_1 0x03 -#define E_NODEID_2 0x04 -#define E_PORT_SELECT 0x14 - #define E_PORT_BNC 0x00 - #define E_PORT_DIX 0x01 - #define E_PORT_TP 0x02 - #define E_PORT_AUTO 0x03 - #define E_PORT_FROM_EPROM 0x04 -#define E_PRODUCT_CFG 0x30 - - -/* Macro to slow down io between EEPROM clock transitions */ -#define eeprom_slow_io() do { int _i = 40; while(--_i > 0) { inb(0x80); }}while(0) - -/* Jumperless Configuration Register (BMPR19) */ -#define JUMPERLESS_CONFIG 19 - -/* ID ROM registers, writing to them also resets some parts of chip */ -#define ID_ROM_0 24 -#define ID_ROM_7 31 -#define RESET ID_ROM_0 - -/* This is the I/O address list to be probed when seeking the card */ -static unsigned int eth16i_portlist[] __initdata = { - 0x260, 0x280, 0x2A0, 0x240, 0x340, 0x320, 0x380, 0x300, 0 -}; - -static unsigned int eth32i_portlist[] __initdata = { - 0x1000, 0x2000, 0x3000, 0x4000, 0x5000, 0x6000, 0x7000, 0x8000, - 0x9000, 0xA000, 0xB000, 0xC000, 0xD000, 0xE000, 0xF000, 0 -}; - -/* This is the Interrupt lookup table for Eth16i card */ -static unsigned int eth16i_irqmap[] __initdata = { 9, 10, 5, 15, 0 }; -#define NUM_OF_ISA_IRQS 4 - -/* This is the Interrupt lookup table for Eth32i card */ -static unsigned int eth32i_irqmap[] __initdata = { 3, 5, 7, 9, 10, 11, 12, 15, 0 }; -#define EISA_IRQ_REG 0xc89 -#define NUM_OF_EISA_IRQS 8 - -static unsigned int eth16i_tx_buf_map[] = { 2048, 2048, 4096, 8192 }; - -/* Use 0 for production, 1 for verification, >2 for debug */ -#ifndef ETH16I_DEBUG -#define ETH16I_DEBUG 0 -#endif -static unsigned int eth16i_debug = ETH16I_DEBUG; - -/* Information for each board */ - -struct eth16i_local { - unsigned char tx_started; - unsigned char tx_buf_busy; - unsigned short tx_queue; /* Number of packets in transmit buffer */ - unsigned short tx_queue_len; - unsigned int tx_buf_size; - unsigned long open_time; - unsigned long tx_buffered_packets; - unsigned long tx_buffered_bytes; - unsigned long col_16; - spinlock_t lock; -}; - -/* Function prototypes */ - -static int eth16i_probe1(struct net_device *dev, int ioaddr); -static int eth16i_check_signature(int ioaddr); -static int eth16i_probe_port(int ioaddr); -static void eth16i_set_port(int ioaddr, int porttype); -static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l); -static int eth16i_receive_probe_packet(int ioaddr); -static int eth16i_get_irq(int ioaddr); -static int eth16i_read_eeprom(int ioaddr, int offset); -static int eth16i_read_eeprom_word(int ioaddr); -static void eth16i_eeprom_cmd(int ioaddr, unsigned char command); -static int eth16i_open(struct net_device *dev); -static int eth16i_close(struct net_device *dev); -static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev); -static void eth16i_rx(struct net_device *dev); -static void eth16i_timeout(struct net_device *dev); -static irqreturn_t eth16i_interrupt(int irq, void *dev_id); -static void eth16i_reset(struct net_device *dev); -static void eth16i_timeout(struct net_device *dev); -static void eth16i_skip_packet(struct net_device *dev); -static void eth16i_multicast(struct net_device *dev); -static void eth16i_select_regbank(unsigned char regbank, int ioaddr); -static void eth16i_initialize(struct net_device *dev, int boot); - -#if 0 -static int eth16i_set_irq(struct net_device *dev); -#endif - -#ifdef MODULE -static ushort eth16i_parse_mediatype(const char* s); -#endif - -static char cardname[] __initdata = "ICL EtherTeam 16i/32"; - -static int __init do_eth16i_probe(struct net_device *dev) -{ - int i; - int ioaddr; - int base_addr = dev->base_addr; - - if(eth16i_debug > 4) - printk(KERN_DEBUG "Probing started for %s\n", cardname); - - if(base_addr > 0x1ff) /* Check only single location */ - return eth16i_probe1(dev, base_addr); - else if(base_addr != 0) /* Don't probe at all */ - return -ENXIO; - - /* Seek card from the ISA io address space */ - for(i = 0; (ioaddr = eth16i_portlist[i]) ; i++) - if(eth16i_probe1(dev, ioaddr) == 0) - return 0; - - /* Seek card from the EISA io address space */ - for(i = 0; (ioaddr = eth32i_portlist[i]) ; i++) - if(eth16i_probe1(dev, ioaddr) == 0) - return 0; - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init eth16i_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct eth16i_local)); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_eth16i_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops eth16i_netdev_ops = { - .ndo_open = eth16i_open, - .ndo_stop = eth16i_close, - .ndo_start_xmit = eth16i_tx, - .ndo_set_rx_mode = eth16i_multicast, - .ndo_tx_timeout = eth16i_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __init eth16i_probe1(struct net_device *dev, int ioaddr) -{ - struct eth16i_local *lp = netdev_priv(dev); - static unsigned version_printed; - int retval; - - /* Let's grab the region */ - if (!request_region(ioaddr, ETH16I_IO_EXTENT, cardname)) - return -EBUSY; - - /* - The MB86985 chip has on register which holds information in which - io address the chip lies. First read this register and compare - it to our current io address and if match then this could - be our chip. - */ - - if(ioaddr < 0x1000) { - if(eth16i_portlist[(inb(ioaddr + JUMPERLESS_CONFIG) & 0x07)] - != ioaddr) { - retval = -ENODEV; - goto out; - } - } - - /* Now we will go a bit deeper and try to find the chip's signature */ - - if(eth16i_check_signature(ioaddr) != 0) { - retval = -ENODEV; - goto out; - } - - /* - Now it seems that we have found a ethernet chip in this particular - ioaddr. The MB86985 chip has this feature, that when you read a - certain register it will increase it's io base address to next - configurable slot. Now when we have found the chip, first thing is - to make sure that the chip's ioaddr will hold still here. - */ - - eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr); - outb(0x00, ioaddr + TRANSCEIVER_MODE_REG); - - outb(0x00, ioaddr + RESET); /* Reset some parts of chip */ - BITSET(ioaddr + CONFIG_REG_0, BIT(7)); /* Disable the data link */ - - if( (eth16i_debug & version_printed++) == 0) - printk(KERN_INFO "%s", version); - - dev->base_addr = ioaddr; - dev->irq = eth16i_get_irq(ioaddr); - - /* Try to obtain interrupt vector */ - - if ((retval = request_irq(dev->irq, (void *)ð16i_interrupt, 0, cardname, dev))) { - printk(KERN_WARNING "%s at %#3x, but is unusable due to conflicting IRQ %d.\n", - cardname, ioaddr, dev->irq); - goto out; - } - - printk(KERN_INFO "%s: %s at %#3x, IRQ %d, ", - dev->name, cardname, ioaddr, dev->irq); - - - /* Now we will have to lock the chip's io address */ - eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr); - outb(0x38, ioaddr + TRANSCEIVER_MODE_REG); - - eth16i_initialize(dev, 1); /* Initialize rest of the chip's registers */ - - /* Now let's same some energy by shutting down the chip ;) */ - BITCLR(ioaddr + CONFIG_REG_1, POWERUP); - - /* Initialize the device structure */ - dev->netdev_ops = ð16i_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - spin_lock_init(&lp->lock); - - retval = register_netdev(dev); - if (retval) - goto out1; - return 0; -out1: - free_irq(dev->irq, dev); -out: - release_region(ioaddr, ETH16I_IO_EXTENT); - return retval; -} - - -static void eth16i_initialize(struct net_device *dev, int boot) -{ - int ioaddr = dev->base_addr; - int i, node_w = 0; - unsigned char node_byte = 0; - - /* Setup station address */ - eth16i_select_regbank(NODE_ID_RB, ioaddr); - for(i = 0 ; i < 3 ; i++) { - unsigned short node_val = eth16i_read_eeprom(ioaddr, E_NODEID_0 + i); - ((unsigned short *)dev->dev_addr)[i] = ntohs(node_val); - } - - for(i = 0; i < 6; i++) { - outb( ((unsigned char *)dev->dev_addr)[i], ioaddr + NODE_ID_0 + i); - if(boot) { - printk("%02x", inb(ioaddr + NODE_ID_0 + i)); - if(i != 5) - printk(":"); - } - } - - /* Now we will set multicast addresses to accept none */ - eth16i_select_regbank(HASH_TABLE_RB, ioaddr); - for(i = 0; i < 8; i++) - outb(0x00, ioaddr + HASH_TABLE_0 + i); - - /* - Now let's disable the transmitter and receiver, set the buffer ram - cycle time, bus width and buffer data path width. Also we shall - set transmit buffer size and total buffer size. - */ - - eth16i_select_regbank(2, ioaddr); - - node_byte = 0; - node_w = eth16i_read_eeprom(ioaddr, E_PRODUCT_CFG); - - if( (node_w & 0xFF00) == 0x0800) - node_byte |= BUFFER_WIDTH_8; - - node_byte |= SRAM_BS1; - - if( (node_w & 0x00FF) == 64) - node_byte |= SRAM_BS0; - - node_byte |= DLC_EN | SRAM_CYCLE_TIME_100NS | (ETH16I_TX_BUF_SIZE << 2); - - outb(node_byte, ioaddr + CONFIG_REG_0); - - /* We shall halt the transmitting, if 16 collisions are detected */ - outb(HALT_ON_16, ioaddr + COL_16_REG); - -#ifdef MODULE - /* if_port already set by init_module() */ -#else - dev->if_port = (dev->mem_start < E_PORT_FROM_EPROM) ? - dev->mem_start : E_PORT_FROM_EPROM; -#endif - - /* Set interface port type */ - if(boot) { - static const char * const porttype[] = { - "BNC", "DIX", "TP", "AUTO", "FROM_EPROM" - }; - - switch(dev->if_port) - { - - case E_PORT_FROM_EPROM: - dev->if_port = eth16i_read_eeprom(ioaddr, E_PORT_SELECT); - break; - - case E_PORT_AUTO: - dev->if_port = eth16i_probe_port(ioaddr); - break; - - case E_PORT_BNC: - case E_PORT_TP: - case E_PORT_DIX: - break; - } - - printk(" %s interface.\n", porttype[dev->if_port]); - - eth16i_set_port(ioaddr, dev->if_port); - } - - /* Set Receive Mode to normal operation */ - outb(MODE_2, ioaddr + RECEIVE_MODE_REG); -} - -static int eth16i_probe_port(int ioaddr) -{ - int i; - int retcode; - unsigned char dummy_packet[64]; - - /* Powerup the chip */ - outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1); - - BITSET(ioaddr + CONFIG_REG_0, DLC_EN); - - eth16i_select_regbank(NODE_ID_RB, ioaddr); - - for(i = 0; i < 6; i++) { - dummy_packet[i] = inb(ioaddr + NODE_ID_0 + i); - dummy_packet[i+6] = inb(ioaddr + NODE_ID_0 + i); - } - - dummy_packet[12] = 0x00; - dummy_packet[13] = 0x04; - memset(dummy_packet + 14, 0, sizeof(dummy_packet) - 14); - - eth16i_select_regbank(2, ioaddr); - - for(i = 0; i < 3; i++) { - BITSET(ioaddr + CONFIG_REG_0, DLC_EN); - BITCLR(ioaddr + CONFIG_REG_0, DLC_EN); - eth16i_set_port(ioaddr, i); - - if(eth16i_debug > 1) - printk(KERN_DEBUG "Set port number %d\n", i); - - retcode = eth16i_send_probe_packet(ioaddr, dummy_packet, 64); - if(retcode == 0) { - retcode = eth16i_receive_probe_packet(ioaddr); - if(retcode != -1) { - if(eth16i_debug > 1) - printk(KERN_DEBUG "Eth16i interface port found at %d\n", i); - return i; - } - } - else { - if(eth16i_debug > 1) - printk(KERN_DEBUG "TRANSMIT_DONE timeout when probing interface port\n"); - } - } - - if( eth16i_debug > 1) - printk(KERN_DEBUG "Using default port\n"); - - return E_PORT_BNC; -} - -static void eth16i_set_port(int ioaddr, int porttype) -{ - unsigned short temp = 0; - - eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr); - outb(LOOPBACK_CONTROL, ioaddr + TRANSMIT_MODE_REG); - - temp |= DIS_AUTO_PORT_SEL; - - switch(porttype) { - - case E_PORT_BNC : - temp |= AUI_SELECT; - break; - - case E_PORT_TP : - break; - - case E_PORT_DIX : - temp |= AUI_SELECT; - BITSET(ioaddr + TRANSMIT_MODE_REG, CONTROL_OUTPUT); - break; - } - - outb(temp, ioaddr + TRANSCEIVER_MODE_REG); - - if(eth16i_debug > 1) { - printk(KERN_DEBUG "TRANSMIT_MODE_REG = %x\n", inb(ioaddr + TRANSMIT_MODE_REG)); - printk(KERN_DEBUG "TRANSCEIVER_MODE_REG = %x\n", - inb(ioaddr+TRANSCEIVER_MODE_REG)); - } -} - -static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l) -{ - unsigned long starttime; - - outb(0xff, ioaddr + TX_STATUS_REG); - - outw(l, ioaddr + DATAPORT); - outsw(ioaddr + DATAPORT, (unsigned short *)b, (l + 1) >> 1); - - starttime = jiffies; - outb(TX_START | 1, ioaddr + TRANSMIT_START_REG); - - while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) { - if( time_after(jiffies, starttime + TX_TIMEOUT)) { - return -1; - } - } - - return 0; -} - -static int eth16i_receive_probe_packet(int ioaddr) -{ - unsigned long starttime; - - starttime = jiffies; - - while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) { - if( time_after(jiffies, starttime + TX_TIMEOUT)) { - - if(eth16i_debug > 1) - printk(KERN_DEBUG "Timeout occurred waiting transmit packet received\n"); - starttime = jiffies; - while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) { - if( time_after(jiffies, starttime + TX_TIMEOUT)) { - if(eth16i_debug > 1) - printk(KERN_DEBUG "Timeout occurred waiting receive packet\n"); - return -1; - } - } - - if(eth16i_debug > 1) - printk(KERN_DEBUG "RECEIVE_PACKET\n"); - return 0; /* Found receive packet */ - } - } - - if(eth16i_debug > 1) { - printk(KERN_DEBUG "TRANSMIT_PACKET_RECEIVED %x\n", inb(ioaddr + TX_STATUS_REG)); - printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG)); - } - - return 0; /* Return success */ -} - -#if 0 -static int eth16i_set_irq(struct net_device* dev) -{ - const int ioaddr = dev->base_addr; - const int irq = dev->irq; - int i = 0; - - if(ioaddr < 0x1000) { - while(eth16i_irqmap[i] && eth16i_irqmap[i] != irq) - i++; - - if(i < NUM_OF_ISA_IRQS) { - u8 cbyte = inb(ioaddr + JUMPERLESS_CONFIG); - cbyte = (cbyte & 0x3F) | (i << 6); - outb(cbyte, ioaddr + JUMPERLESS_CONFIG); - return 0; - } - } - else { - printk(KERN_NOTICE "%s: EISA Interrupt cannot be set. Use EISA Configuration utility.\n", dev->name); - } - - return -1; - -} -#endif - -static int __init eth16i_get_irq(int ioaddr) -{ - unsigned char cbyte; - - if( ioaddr < 0x1000) { - cbyte = inb(ioaddr + JUMPERLESS_CONFIG); - return eth16i_irqmap[((cbyte & 0xC0) >> 6)]; - } else { /* Oh..the card is EISA so method getting IRQ different */ - unsigned short index = 0; - cbyte = inb(ioaddr + EISA_IRQ_REG); - while( (cbyte & 0x01) == 0) { - cbyte = cbyte >> 1; - index++; - } - return eth32i_irqmap[index]; - } -} - -static int __init eth16i_check_signature(int ioaddr) -{ - int i; - unsigned char creg[4] = { 0 }; - - for(i = 0; i < 4 ; i++) { - - creg[i] = inb(ioaddr + TRANSMIT_MODE_REG + i); - - if(eth16i_debug > 1) - printk("eth16i: read signature byte %x at %x\n", - creg[i], - ioaddr + TRANSMIT_MODE_REG + i); - } - - creg[0] &= 0x0F; /* Mask collision cnr */ - creg[2] &= 0x7F; /* Mask DCLEN bit */ - -#if 0 - /* - This was removed because the card was sometimes left to state - from which it couldn't be find anymore. If there is need - to more strict check still this have to be fixed. - */ - if( ! ((creg[0] == 0x06) && (creg[1] == 0x41)) ) { - if(creg[1] != 0x42) - return -1; - } -#endif - - if( !((creg[2] == 0x36) && (creg[3] == 0xE0)) ) { - creg[2] &= 0x40; - creg[3] &= 0x03; - - if( !((creg[2] == 0x40) && (creg[3] == 0x00)) ) - return -1; - } - - if(eth16i_read_eeprom(ioaddr, E_NODEID_0) != 0) - return -1; - - if((eth16i_read_eeprom(ioaddr, E_NODEID_1) & 0xFF00) != 0x4B00) - return -1; - - return 0; -} - -static int eth16i_read_eeprom(int ioaddr, int offset) -{ - int data = 0; - - eth16i_eeprom_cmd(ioaddr, EEPROM_READ | offset); - outb(CS_1, ioaddr + EEPROM_CTRL_REG); - data = eth16i_read_eeprom_word(ioaddr); - outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG); - - return data; -} - -static int eth16i_read_eeprom_word(int ioaddr) -{ - int i; - int data = 0; - - for(i = 16; i > 0; i--) { - outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG); - eeprom_slow_io(); - outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG); - eeprom_slow_io(); - data = (data << 1) | - ((inb(ioaddr + EEPROM_DATA_REG) & DI_1) ? 1 : 0); - - eeprom_slow_io(); - } - - return data; -} - -static void eth16i_eeprom_cmd(int ioaddr, unsigned char command) -{ - int i; - - outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG); - outb(DI_0, ioaddr + EEPROM_DATA_REG); - outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG); - outb(DI_1, ioaddr + EEPROM_DATA_REG); - outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG); - - for(i = 7; i >= 0; i--) { - short cmd = ( (command & (1 << i)) ? DI_1 : DI_0 ); - outb(cmd, ioaddr + EEPROM_DATA_REG); - outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG); - eeprom_slow_io(); - outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG); - eeprom_slow_io(); - } -} - -static int eth16i_open(struct net_device *dev) -{ - struct eth16i_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - /* Powerup the chip */ - outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1); - - /* Initialize the chip */ - eth16i_initialize(dev, 0); - - /* Set the transmit buffer size */ - lp->tx_buf_size = eth16i_tx_buf_map[ETH16I_TX_BUF_SIZE & 0x03]; - - if(eth16i_debug > 0) - printk(KERN_DEBUG "%s: transmit buffer size %d\n", - dev->name, lp->tx_buf_size); - - /* Now enable Transmitter and Receiver sections */ - BITCLR(ioaddr + CONFIG_REG_0, DLC_EN); - - /* Now switch to register bank 2, for run time operation */ - eth16i_select_regbank(2, ioaddr); - - lp->open_time = jiffies; - lp->tx_started = 0; - lp->tx_queue = 0; - lp->tx_queue_len = 0; - - /* Turn on interrupts*/ - outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG); - - netif_start_queue(dev); - return 0; -} - -static int eth16i_close(struct net_device *dev) -{ - struct eth16i_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - eth16i_reset(dev); - - /* Turn off interrupts*/ - outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG); - - netif_stop_queue(dev); - - lp->open_time = 0; - - /* Disable transmit and receive */ - BITSET(ioaddr + CONFIG_REG_0, DLC_EN); - - /* Reset the chip */ - /* outb(0xff, ioaddr + RESET); */ - /* outw(0xffff, ioaddr + TX_STATUS_REG); */ - - outb(0x00, ioaddr + CONFIG_REG_1); - - return 0; -} - -static void eth16i_timeout(struct net_device *dev) -{ - struct eth16i_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - /* - If we get here, some higher level has decided that - we are broken. There should really be a "kick me" - function call instead. - */ - - outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG); - printk(KERN_WARNING "%s: transmit timed out with status %04x, %s ?\n", - dev->name, - inw(ioaddr + TX_STATUS_REG), (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ? - "IRQ conflict" : "network cable problem"); - - dev->trans_start = jiffies; /* prevent tx timeout */ - - /* Let's dump all registers */ - if(eth16i_debug > 0) { - printk(KERN_DEBUG "%s: timeout: %02x %02x %02x %02x %02x %02x %02x %02x.\n", - dev->name, inb(ioaddr + 0), - inb(ioaddr + 1), inb(ioaddr + 2), - inb(ioaddr + 3), inb(ioaddr + 4), - inb(ioaddr + 5), - inb(ioaddr + 6), inb(ioaddr + 7)); - - printk(KERN_DEBUG "%s: transmit start reg: %02x. collision reg %02x\n", - dev->name, inb(ioaddr + TRANSMIT_START_REG), - inb(ioaddr + COL_16_REG)); - printk(KERN_DEBUG "lp->tx_queue = %d\n", lp->tx_queue); - printk(KERN_DEBUG "lp->tx_queue_len = %d\n", lp->tx_queue_len); - printk(KERN_DEBUG "lp->tx_started = %d\n", lp->tx_started); - } - dev->stats.tx_errors++; - eth16i_reset(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ - outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG); - netif_wake_queue(dev); -} - -static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev) -{ - struct eth16i_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - int status = 0; - ushort length = skb->len; - unsigned char *buf; - unsigned long flags; - - if (length < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - length = ETH_ZLEN; - } - buf = skb->data; - - netif_stop_queue(dev); - - /* Turn off TX interrupts */ - outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG); - - /* We would be better doing the disable_irq tricks the 3c509 does, - that would make this suck a lot less */ - - spin_lock_irqsave(&lp->lock, flags); - - if( (length + 2) > (lp->tx_buf_size - lp->tx_queue_len)) { - if(eth16i_debug > 0) - printk(KERN_WARNING "%s: Transmit buffer full.\n", dev->name); - } - else { - outw(length, ioaddr + DATAPORT); - - if( ioaddr < 0x1000 ) - outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1); - else { - unsigned char frag = length % 4; - outsl(ioaddr + DATAPORT, buf, length >> 2); - if( frag != 0 ) { - outsw(ioaddr + DATAPORT, (buf + (length & 0xFFFC)), 1); - if( frag == 3 ) - outsw(ioaddr + DATAPORT, - (buf + (length & 0xFFFC) + 2), 1); - } - } - lp->tx_buffered_packets++; - lp->tx_buffered_bytes = length; - lp->tx_queue++; - lp->tx_queue_len += length + 2; - } - lp->tx_buf_busy = 0; - - if(lp->tx_started == 0) { - /* If the transmitter is idle..always trigger a transmit */ - outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG); - lp->tx_queue = 0; - lp->tx_queue_len = 0; - lp->tx_started = 1; - netif_wake_queue(dev); - } - else if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) { - /* There is still more room for one more packet in tx buffer */ - netif_wake_queue(dev); - } - - spin_unlock_irqrestore(&lp->lock, flags); - - outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG); - /* Turn TX interrupts back on */ - /* outb(TX_INTR_DONE | TX_INTR_16_COL, ioaddr + TX_INTR_REG); */ - status = 0; - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -static void eth16i_rx(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - int boguscount = MAX_RX_LOOP; - - /* Loop until all packets have been read */ - while( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) { - - /* Read status byte from receive buffer */ - ushort status = inw(ioaddr + DATAPORT); - - /* Get the size of the packet from receive buffer */ - ushort pkt_len = inw(ioaddr + DATAPORT); - - if(eth16i_debug > 4) - printk(KERN_DEBUG "%s: Receiving packet mode %02x status %04x.\n", - dev->name, - inb(ioaddr + RECEIVE_MODE_REG), status); - - if( !(status & PKT_GOOD) ) { - dev->stats.rx_errors++; - - if( (pkt_len < ETH_ZLEN) || (pkt_len > ETH_FRAME_LEN) ) { - dev->stats.rx_length_errors++; - eth16i_reset(dev); - return; - } - else { - eth16i_skip_packet(dev); - dev->stats.rx_dropped++; - } - } - else { /* Ok so now we should have a good packet */ - struct sk_buff *skb; - - skb = netdev_alloc_skb(dev, pkt_len + 3); - if( skb == NULL ) { - printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n", - dev->name, pkt_len); - eth16i_skip_packet(dev); - dev->stats.rx_dropped++; - break; - } - - skb_reserve(skb,2); - - /* - Now let's get the packet out of buffer. - size is (pkt_len + 1) >> 1, cause we are now reading words - and it have to be even aligned. - */ - - if(ioaddr < 0x1000) - insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), - (pkt_len + 1) >> 1); - else { - unsigned char *buf = skb_put(skb, pkt_len); - unsigned char frag = pkt_len % 4; - - insl(ioaddr + DATAPORT, buf, pkt_len >> 2); - - if(frag != 0) { - unsigned short rest[2]; - rest[0] = inw( ioaddr + DATAPORT ); - if(frag == 3) - rest[1] = inw( ioaddr + DATAPORT ); - - memcpy(buf + (pkt_len & 0xfffc), (char *)rest, frag); - } - } - - skb->protocol=eth_type_trans(skb, dev); - - if( eth16i_debug > 5 ) { - int i; - printk(KERN_DEBUG "%s: Received packet of length %d.\n", - dev->name, pkt_len); - for(i = 0; i < 14; i++) - printk(KERN_DEBUG " %02x", skb->data[i]); - printk(KERN_DEBUG ".\n"); - } - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - - } /* else */ - - if(--boguscount <= 0) - break; - - } /* while */ -} - -static irqreturn_t eth16i_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct eth16i_local *lp; - int ioaddr = 0, status; - int handled = 0; - - ioaddr = dev->base_addr; - lp = netdev_priv(dev); - - /* Turn off all interrupts from adapter */ - outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG); - - /* eth16i_tx won't be called */ - spin_lock(&lp->lock); - - status = inw(ioaddr + TX_STATUS_REG); /* Get the status */ - outw(status, ioaddr + TX_STATUS_REG); /* Clear status bits */ - - if (status) - handled = 1; - - if(eth16i_debug > 3) - printk(KERN_DEBUG "%s: Interrupt with status %04x.\n", dev->name, status); - - if( status & 0x7f00 ) { - - dev->stats.rx_errors++; - - if(status & (BUS_RD_ERR << 8) ) - printk(KERN_WARNING "%s: Bus read error.\n",dev->name); - if(status & (SHORT_PKT_ERR << 8) ) dev->stats.rx_length_errors++; - if(status & (ALIGN_ERR << 8) ) dev->stats.rx_frame_errors++; - if(status & (CRC_ERR << 8) ) dev->stats.rx_crc_errors++; - if(status & (RX_BUF_OVERFLOW << 8) ) dev->stats.rx_over_errors++; - } - if( status & 0x001a) { - - dev->stats.tx_errors++; - - if(status & CR_LOST) dev->stats.tx_carrier_errors++; - if(status & TX_JABBER_ERR) dev->stats.tx_window_errors++; - -#if 0 - if(status & COLLISION) { - dev->stats.collisions += - ((inb(ioaddr+TRANSMIT_MODE_REG) & 0xF0) >> 4); - } -#endif - if(status & COLLISIONS_16) { - if(lp->col_16 < MAX_COL_16) { - lp->col_16++; - dev->stats.collisions++; - /* Resume transmitting, skip failed packet */ - outb(0x02, ioaddr + COL_16_REG); - } - else { - printk(KERN_WARNING "%s: bailing out due to many consecutive 16-in-a-row collisions. Network cable problem?\n", dev->name); - } - } - } - - if( status & 0x00ff ) { /* Let's check the transmit status reg */ - - if(status & TX_DONE) { /* The transmit has been done */ - dev->stats.tx_packets = lp->tx_buffered_packets; - dev->stats.tx_bytes += lp->tx_buffered_bytes; - lp->col_16 = 0; - - if(lp->tx_queue) { /* Is there still packets ? */ - /* There was packet(s) so start transmitting and write also - how many packets there is to be sended */ - outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG); - lp->tx_queue = 0; - lp->tx_queue_len = 0; - lp->tx_started = 1; - } - else { - lp->tx_started = 0; - } - netif_wake_queue(dev); - } - } - - if( ( status & 0x8000 ) || - ( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) ) { - eth16i_rx(dev); /* We have packet in receive buffer */ - } - - /* Turn interrupts back on */ - outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG); - - if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) { - /* There is still more room for one more packet in tx buffer */ - netif_wake_queue(dev); - } - - spin_unlock(&lp->lock); - - return IRQ_RETVAL(handled); -} - -static void eth16i_skip_packet(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - inw(ioaddr + DATAPORT); - inw(ioaddr + DATAPORT); - inw(ioaddr + DATAPORT); - - outb(SKIP_RX_PACKET, ioaddr + FILTER_SELF_RX_REG); - while( inb( ioaddr + FILTER_SELF_RX_REG ) != 0); -} - -static void eth16i_reset(struct net_device *dev) -{ - struct eth16i_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - if(eth16i_debug > 1) - printk(KERN_DEBUG "%s: Resetting device.\n", dev->name); - - BITSET(ioaddr + CONFIG_REG_0, DLC_EN); - outw(0xffff, ioaddr + TX_STATUS_REG); - eth16i_select_regbank(2, ioaddr); - - lp->tx_started = 0; - lp->tx_buf_busy = 0; - lp->tx_queue = 0; - lp->tx_queue_len = 0; - BITCLR(ioaddr + CONFIG_REG_0, DLC_EN); -} - -static void eth16i_multicast(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) - { - outb(3, ioaddr + RECEIVE_MODE_REG); - } else { - outb(2, ioaddr + RECEIVE_MODE_REG); - } -} - -static void eth16i_select_regbank(unsigned char banknbr, int ioaddr) -{ - unsigned char data; - - data = inb(ioaddr + CONFIG_REG_1); - outb( ((data & 0xF3) | ( (banknbr & 0x03) << 2)), ioaddr + CONFIG_REG_1); -} - -#ifdef MODULE - -static ushort eth16i_parse_mediatype(const char* s) -{ - if(!s) - return E_PORT_FROM_EPROM; - - if (!strncmp(s, "bnc", 3)) - return E_PORT_BNC; - else if (!strncmp(s, "tp", 2)) - return E_PORT_TP; - else if (!strncmp(s, "dix", 3)) - return E_PORT_DIX; - else if (!strncmp(s, "auto", 4)) - return E_PORT_AUTO; - else - return E_PORT_FROM_EPROM; -} - -#define MAX_ETH16I_CARDS 4 /* Max number of Eth16i cards per module */ - -static struct net_device *dev_eth16i[MAX_ETH16I_CARDS]; -static int io[MAX_ETH16I_CARDS]; -#if 0 -static int irq[MAX_ETH16I_CARDS]; -#endif -static char* mediatype[MAX_ETH16I_CARDS]; -static int debug = -1; - -MODULE_AUTHOR("Mika Kuoppala <miku@iki.fi>"); -MODULE_DESCRIPTION("ICL EtherTeam 16i/32 driver"); -MODULE_LICENSE("GPL"); - - -module_param_array(io, int, NULL, 0); -MODULE_PARM_DESC(io, "eth16i I/O base address(es)"); - -#if 0 -module_param_array(irq, int, NULL, 0); -MODULE_PARM_DESC(irq, "eth16i interrupt request number"); -#endif - -module_param_array(mediatype, charp, NULL, 0); -MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto,eprom)"); - -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "eth16i debug level (0-6)"); - -int __init init_module(void) -{ - int this_dev, found = 0; - struct net_device *dev; - - for (this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) { - dev = alloc_etherdev(sizeof(struct eth16i_local)); - if (!dev) - break; - - dev->base_addr = io[this_dev]; - - if(debug != -1) - eth16i_debug = debug; - - if(eth16i_debug > 1) - printk(KERN_NOTICE "eth16i(%d): interface type %s\n", this_dev, mediatype[this_dev] ? mediatype[this_dev] : "none" ); - - dev->if_port = eth16i_parse_mediatype(mediatype[this_dev]); - - if(io[this_dev] == 0) { - if (this_dev != 0) { /* Only autoprobe 1st one */ - free_netdev(dev); - break; - } - - printk(KERN_NOTICE "eth16i.c: Presently autoprobing (not recommended) for a single card.\n"); - } - - if (do_eth16i_probe(dev) == 0) { - dev_eth16i[found++] = dev; - continue; - } - printk(KERN_WARNING "eth16i.c No Eth16i card found (i/o = 0x%x).\n", - io[this_dev]); - free_netdev(dev); - break; - } - if (found) - return 0; - return -ENXIO; -} - -void __exit cleanup_module(void) -{ - int this_dev; - - for(this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) { - struct net_device *dev = dev_eth16i[this_dev]; - - if (netdev_priv(dev)) { - unregister_netdev(dev); - free_irq(dev->irq, dev); - release_region(dev->base_addr, ETH16I_IO_EXTENT); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/ethernet/i825xx/3c505.c b/drivers/net/ethernet/i825xx/3c505.c deleted file mode 100644 index 6a5c21b82c51..000000000000 --- a/drivers/net/ethernet/i825xx/3c505.c +++ /dev/null @@ -1,1671 +0,0 @@ -/* - * Linux Ethernet device driver for the 3Com Etherlink Plus (3C505) - * By Craig Southeren, Juha Laiho and Philip Blundell - * - * 3c505.c This module implements an interface to the 3Com - * Etherlink Plus (3c505) Ethernet card. Linux device - * driver interface reverse engineered from the Linux 3C509 - * device drivers. Some 3C505 information gleaned from - * the Crynwr packet driver. Still this driver would not - * be here without 3C505 technical reference provided by - * 3Com. - * - * $Id: 3c505.c,v 1.10 1996/04/16 13:06:27 phil Exp $ - * - * Authors: Linux 3c505 device driver by - * Craig Southeren, <craigs@ineluki.apana.org.au> - * Final debugging by - * Andrew Tridgell, <tridge@nimbus.anu.edu.au> - * Auto irq/address, tuning, cleanup and v1.1.4+ kernel mods by - * Juha Laiho, <jlaiho@ichaos.nullnet.fi> - * Linux 3C509 driver by - * Donald Becker, <becker@super.org> - * (Now at <becker@scyld.com>) - * Crynwr packet driver by - * Krishnan Gopalan and Gregg Stefancik, - * Clemson University Engineering Computer Operations. - * Portions of the code have been adapted from the 3c505 - * driver for NCSA Telnet by Bruce Orchard and later - * modified by Warren Van Houten and krus@diku.dk. - * 3C505 technical information provided by - * Terry Murphy, of 3Com Network Adapter Division - * Linux 1.3.0 changes by - * Alan Cox <Alan.Cox@linux.org> - * More debugging, DMA support, currently maintained by - * Philip Blundell <philb@gnu.org> - * Multicard/soft configurable dma channel/rev 2 hardware support - * by Christopher Collins <ccollins@pcug.org.au> - * Ethtool support (jgarzik), 11/17/2001 - */ - -#define DRV_NAME "3c505" -#define DRV_VERSION "1.10a" - - -/* Theory of operation: - * - * The 3c505 is quite an intelligent board. All communication with it is done - * by means of Primary Command Blocks (PCBs); these are transferred using PIO - * through the command register. The card has 256k of on-board RAM, which is - * used to buffer received packets. It might seem at first that more buffers - * are better, but in fact this isn't true. From my tests, it seems that - * more than about 10 buffers are unnecessary, and there is a noticeable - * performance hit in having more active on the card. So the majority of the - * card's memory isn't, in fact, used. Sadly, the card only has one transmit - * buffer and, short of loading our own firmware into it (which is what some - * drivers resort to) there's nothing we can do about this. - * - * We keep up to 4 "receive packet" commands active on the board at a time. - * When a packet comes in, so long as there is a receive command active, the - * board will send us a "packet received" PCB and then add the data for that - * packet to the DMA queue. If a DMA transfer is not already in progress, we - * set one up to start uploading the data. We have to maintain a list of - * backlogged receive packets, because the card may decide to tell us about - * a newly-arrived packet at any time, and we may not be able to start a DMA - * transfer immediately (ie one may already be going on). We can't NAK the - * PCB, because then it would throw the packet away. - * - * Trying to send a PCB to the card at the wrong moment seems to have bad - * effects. If we send it a transmit PCB while a receive DMA is happening, - * it will just NAK the PCB and so we will have wasted our time. Worse, it - * sometimes seems to interrupt the transfer. The majority of the low-level - * code is protected by one huge semaphore -- "busy" -- which is set whenever - * it probably isn't safe to do anything to the card. The receive routine - * must gain a lock on "busy" before it can start a DMA transfer, and the - * transmit routine must gain a lock before it sends the first PCB to the card. - * The send_pcb() routine also has an internal semaphore to protect it against - * being re-entered (which would be disastrous) -- this is needed because - * several things can happen asynchronously (re-priming the receiver and - * asking the card for statistics, for example). send_pcb() will also refuse - * to talk to the card at all if a DMA upload is happening. The higher-level - * networking code will reschedule a later retry if some part of the driver - * is blocked. In practice, this doesn't seem to happen very often. - */ - -/* This driver may now work with revision 2.x hardware, since all the read - * operations on the HCR have been removed (we now keep our own softcopy). - * But I don't have an old card to test it on. - * - * This has had the bad effect that the autoprobe routine is now a bit - * less friendly to other devices. However, it was never very good. - * before, so I doubt it will hurt anybody. - */ - -/* The driver is a mess. I took Craig's and Juha's code, and hacked it firstly - * to make it more reliable, and secondly to add DMA mode. Many things could - * probably be done better; the concurrency protection is particularly awful. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/interrupt.h> -#include <linux/errno.h> -#include <linux/in.h> -#include <linux/ioport.h> -#include <linux/spinlock.h> -#include <linux/ethtool.h> -#include <linux/delay.h> -#include <linux/bitops.h> -#include <linux/gfp.h> - -#include <asm/uaccess.h> -#include <asm/io.h> -#include <asm/dma.h> - -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/init.h> - -#include "3c505.h" - -/********************************************************* - * - * define debug messages here as common strings to reduce space - * - *********************************************************/ - -#define timeout_msg "*** timeout at %s:%s (line %d) ***\n" -#define TIMEOUT_MSG(lineno) \ - pr_notice(timeout_msg, __FILE__, __func__, (lineno)) - -#define invalid_pcb_msg "*** invalid pcb length %d at %s:%s (line %d) ***\n" -#define INVALID_PCB_MSG(len) \ - pr_notice(invalid_pcb_msg, (len), __FILE__, __func__, __LINE__) - -#define search_msg "%s: Looking for 3c505 adapter at address %#x..." - -#define stilllooking_msg "still looking..." - -#define found_msg "found.\n" - -#define notfound_msg "not found (reason = %d)\n" - -#define couldnot_msg "%s: 3c505 not found\n" - -/********************************************************* - * - * various other debug stuff - * - *********************************************************/ - -#ifdef ELP_DEBUG -static int elp_debug = ELP_DEBUG; -#else -static int elp_debug; -#endif -#define debug elp_debug - -/* - * 0 = no messages (well, some) - * 1 = messages when high level commands performed - * 2 = messages when low level commands performed - * 3 = messages when interrupts received - */ - -/***************************************************************** - * - * List of I/O-addresses we try to auto-sense - * Last element MUST BE 0! - *****************************************************************/ - -static int addr_list[] __initdata = {0x300, 0x280, 0x310, 0}; - -/* Dma Memory related stuff */ - -static unsigned long dma_mem_alloc(int size) -{ - int order = get_order(size); - return __get_dma_pages(GFP_KERNEL, order); -} - - -/***************************************************************** - * - * Functions for I/O (note the inline !) - * - *****************************************************************/ - -static inline unsigned char inb_status(unsigned int base_addr) -{ - return inb(base_addr + PORT_STATUS); -} - -static inline int inb_command(unsigned int base_addr) -{ - return inb(base_addr + PORT_COMMAND); -} - -static inline void outb_control(unsigned char val, struct net_device *dev) -{ - outb(val, dev->base_addr + PORT_CONTROL); - ((elp_device *)(netdev_priv(dev)))->hcr_val = val; -} - -#define HCR_VAL(x) (((elp_device *)(netdev_priv(x)))->hcr_val) - -static inline void outb_command(unsigned char val, unsigned int base_addr) -{ - outb(val, base_addr + PORT_COMMAND); -} - -static inline unsigned int backlog_next(unsigned int n) -{ - return (n + 1) % BACKLOG_SIZE; -} - -/***************************************************************** - * - * useful functions for accessing the adapter - * - *****************************************************************/ - -/* - * use this routine when accessing the ASF bits as they are - * changed asynchronously by the adapter - */ - -/* get adapter PCB status */ -#define GET_ASF(addr) \ - (get_status(addr)&ASF_PCB_MASK) - -static inline int get_status(unsigned int base_addr) -{ - unsigned long timeout = jiffies + 10*HZ/100; - register int stat1; - do { - stat1 = inb_status(base_addr); - } while (stat1 != inb_status(base_addr) && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) - TIMEOUT_MSG(__LINE__); - return stat1; -} - -static inline void set_hsf(struct net_device *dev, int hsf) -{ - elp_device *adapter = netdev_priv(dev); - unsigned long flags; - - spin_lock_irqsave(&adapter->lock, flags); - outb_control((HCR_VAL(dev) & ~HSF_PCB_MASK) | hsf, dev); - spin_unlock_irqrestore(&adapter->lock, flags); -} - -static bool start_receive(struct net_device *, pcb_struct *); - -static inline void adapter_reset(struct net_device *dev) -{ - unsigned long timeout; - elp_device *adapter = netdev_priv(dev); - unsigned char orig_hcr = adapter->hcr_val; - - outb_control(0, dev); - - if (inb_status(dev->base_addr) & ACRF) { - do { - inb_command(dev->base_addr); - timeout = jiffies + 2*HZ/100; - while (time_before_eq(jiffies, timeout) && !(inb_status(dev->base_addr) & ACRF)); - } while (inb_status(dev->base_addr) & ACRF); - set_hsf(dev, HSF_PCB_NAK); - } - outb_control(adapter->hcr_val | ATTN | DIR, dev); - mdelay(10); - outb_control(adapter->hcr_val & ~ATTN, dev); - mdelay(10); - outb_control(adapter->hcr_val | FLSH, dev); - mdelay(10); - outb_control(adapter->hcr_val & ~FLSH, dev); - mdelay(10); - - outb_control(orig_hcr, dev); - if (!start_receive(dev, &adapter->tx_pcb)) - pr_err("%s: start receive command failed\n", dev->name); -} - -/* Check to make sure that a DMA transfer hasn't timed out. This should - * never happen in theory, but seems to occur occasionally if the card gets - * prodded at the wrong time. - */ -static inline void check_3c505_dma(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) { - unsigned long flags, f; - pr_err("%s: DMA %s timed out, %d bytes left\n", dev->name, - adapter->current_dma.direction ? "download" : "upload", - get_dma_residue(dev->dma)); - spin_lock_irqsave(&adapter->lock, flags); - adapter->dmaing = 0; - adapter->busy = 0; - - f=claim_dma_lock(); - disable_dma(dev->dma); - release_dma_lock(f); - - if (adapter->rx_active) - adapter->rx_active--; - outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev); - spin_unlock_irqrestore(&adapter->lock, flags); - } -} - -/* Primitive functions used by send_pcb() */ -static inline bool send_pcb_slow(unsigned int base_addr, unsigned char byte) -{ - unsigned long timeout; - outb_command(byte, base_addr); - for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) { - if (inb_status(base_addr) & HCRE) - return false; - } - pr_warning("3c505: send_pcb_slow timed out\n"); - return true; -} - -static inline bool send_pcb_fast(unsigned int base_addr, unsigned char byte) -{ - unsigned int timeout; - outb_command(byte, base_addr); - for (timeout = 0; timeout < 40000; timeout++) { - if (inb_status(base_addr) & HCRE) - return false; - } - pr_warning("3c505: send_pcb_fast timed out\n"); - return true; -} - -/* Check to see if the receiver needs restarting, and kick it if so */ -static inline void prime_rx(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - while (adapter->rx_active < ELP_RX_PCBS && netif_running(dev)) { - if (!start_receive(dev, &adapter->itx_pcb)) - break; - } -} - -/***************************************************************** - * - * send_pcb - * Send a PCB to the adapter. - * - * output byte to command reg --<--+ - * wait until HCRE is non zero | - * loop until all bytes sent -->--+ - * set HSF1 and HSF2 to 1 - * output pcb length - * wait until ASF give ACK or NAK - * set HSF1 and HSF2 to 0 - * - *****************************************************************/ - -/* This can be quite slow -- the adapter is allowed to take up to 40ms - * to respond to the initial interrupt. - * - * We run initially with interrupts turned on, but with a semaphore set - * so that nobody tries to re-enter this code. Once the first byte has - * gone through, we turn interrupts off and then send the others (the - * timeout is reduced to 500us). - */ - -static bool send_pcb(struct net_device *dev, pcb_struct * pcb) -{ - int i; - unsigned long timeout; - elp_device *adapter = netdev_priv(dev); - unsigned long flags; - - check_3c505_dma(dev); - - if (adapter->dmaing && adapter->current_dma.direction == 0) - return false; - - /* Avoid contention */ - if (test_and_set_bit(1, &adapter->send_pcb_semaphore)) { - if (elp_debug >= 3) { - pr_debug("%s: send_pcb entered while threaded\n", dev->name); - } - return false; - } - /* - * load each byte into the command register and - * wait for the HCRE bit to indicate the adapter - * had read the byte - */ - set_hsf(dev, 0); - - if (send_pcb_slow(dev->base_addr, pcb->command)) - goto abort; - - spin_lock_irqsave(&adapter->lock, flags); - - if (send_pcb_fast(dev->base_addr, pcb->length)) - goto sti_abort; - - for (i = 0; i < pcb->length; i++) { - if (send_pcb_fast(dev->base_addr, pcb->data.raw[i])) - goto sti_abort; - } - - outb_control(adapter->hcr_val | 3, dev); /* signal end of PCB */ - outb_command(2 + pcb->length, dev->base_addr); - - /* now wait for the acknowledgement */ - spin_unlock_irqrestore(&adapter->lock, flags); - - for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) { - switch (GET_ASF(dev->base_addr)) { - case ASF_PCB_ACK: - adapter->send_pcb_semaphore = 0; - return true; - - case ASF_PCB_NAK: -#ifdef ELP_DEBUG - pr_debug("%s: send_pcb got NAK\n", dev->name); -#endif - goto abort; - } - } - - if (elp_debug >= 1) - pr_debug("%s: timeout waiting for PCB acknowledge (status %02x)\n", - dev->name, inb_status(dev->base_addr)); - goto abort; - - sti_abort: - spin_unlock_irqrestore(&adapter->lock, flags); - abort: - adapter->send_pcb_semaphore = 0; - return false; -} - - -/***************************************************************** - * - * receive_pcb - * Read a PCB from the adapter - * - * wait for ACRF to be non-zero ---<---+ - * input a byte | - * if ASF1 and ASF2 were not both one | - * before byte was read, loop --->---+ - * set HSF1 and HSF2 for ack - * - *****************************************************************/ - -static bool receive_pcb(struct net_device *dev, pcb_struct * pcb) -{ - int i, j; - int total_length; - int stat; - unsigned long timeout; - unsigned long flags; - - elp_device *adapter = netdev_priv(dev); - - set_hsf(dev, 0); - - /* get the command code */ - timeout = jiffies + 2*HZ/100; - while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) { - TIMEOUT_MSG(__LINE__); - return false; - } - pcb->command = inb_command(dev->base_addr); - - /* read the data length */ - timeout = jiffies + 3*HZ/100; - while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) { - TIMEOUT_MSG(__LINE__); - pr_info("%s: status %02x\n", dev->name, stat); - return false; - } - pcb->length = inb_command(dev->base_addr); - - if (pcb->length > MAX_PCB_DATA) { - INVALID_PCB_MSG(pcb->length); - adapter_reset(dev); - return false; - } - /* read the data */ - spin_lock_irqsave(&adapter->lock, flags); - for (i = 0; i < MAX_PCB_DATA; i++) { - for (j = 0; j < 20000; j++) { - stat = get_status(dev->base_addr); - if (stat & ACRF) - break; - } - pcb->data.raw[i] = inb_command(dev->base_addr); - if ((stat & ASF_PCB_MASK) == ASF_PCB_END || j >= 20000) - break; - } - spin_unlock_irqrestore(&adapter->lock, flags); - if (i >= MAX_PCB_DATA) { - INVALID_PCB_MSG(i); - return false; - } - if (j >= 20000) { - TIMEOUT_MSG(__LINE__); - return false; - } - /* the last "data" byte was really the length! */ - total_length = pcb->data.raw[i]; - - /* safety check total length vs data length */ - if (total_length != (pcb->length + 2)) { - if (elp_debug >= 2) - pr_warning("%s: mangled PCB received\n", dev->name); - set_hsf(dev, HSF_PCB_NAK); - return false; - } - - if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) { - if (test_and_set_bit(0, (void *) &adapter->busy)) { - if (backlog_next(adapter->rx_backlog.in) == adapter->rx_backlog.out) { - set_hsf(dev, HSF_PCB_NAK); - pr_warning("%s: PCB rejected, transfer in progress and backlog full\n", dev->name); - pcb->command = 0; - return true; - } else { - pcb->command = 0xff; - } - } - } - set_hsf(dev, HSF_PCB_ACK); - return true; -} - -/****************************************************** - * - * queue a receive command on the adapter so we will get an - * interrupt when a packet is received. - * - ******************************************************/ - -static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb) -{ - bool status; - elp_device *adapter = netdev_priv(dev); - - if (elp_debug >= 3) - pr_debug("%s: restarting receiver\n", dev->name); - tx_pcb->command = CMD_RECEIVE_PACKET; - tx_pcb->length = sizeof(struct Rcv_pkt); - tx_pcb->data.rcv_pkt.buf_seg - = tx_pcb->data.rcv_pkt.buf_ofs = 0; /* Unused */ - tx_pcb->data.rcv_pkt.buf_len = 1600; - tx_pcb->data.rcv_pkt.timeout = 0; /* set timeout to zero */ - status = send_pcb(dev, tx_pcb); - if (status) - adapter->rx_active++; - return status; -} - -/****************************************************** - * - * extract a packet from the adapter - * this routine is only called from within the interrupt - * service routine, so no cli/sti calls are needed - * note that the length is always assumed to be even - * - ******************************************************/ - -static void receive_packet(struct net_device *dev, int len) -{ - int rlen; - elp_device *adapter = netdev_priv(dev); - void *target; - struct sk_buff *skb; - unsigned long flags; - - rlen = (len + 1) & ~1; - skb = netdev_alloc_skb(dev, rlen + 2); - - if (!skb) { - pr_warning("%s: memory squeeze, dropping packet\n", dev->name); - target = adapter->dma_buffer; - adapter->current_dma.target = NULL; - /* FIXME: stats */ - return; - } - - skb_reserve(skb, 2); - target = skb_put(skb, rlen); - if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) { - adapter->current_dma.target = target; - target = adapter->dma_buffer; - } else { - adapter->current_dma.target = NULL; - } - - /* if this happens, we die */ - if (test_and_set_bit(0, (void *) &adapter->dmaing)) - pr_err("%s: rx blocked, DMA in progress, dir %d\n", - dev->name, adapter->current_dma.direction); - - adapter->current_dma.direction = 0; - adapter->current_dma.length = rlen; - adapter->current_dma.skb = skb; - adapter->current_dma.start_time = jiffies; - - outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev); - - flags=claim_dma_lock(); - disable_dma(dev->dma); - clear_dma_ff(dev->dma); - set_dma_mode(dev->dma, 0x04); /* dma read */ - set_dma_addr(dev->dma, isa_virt_to_bus(target)); - set_dma_count(dev->dma, rlen); - enable_dma(dev->dma); - release_dma_lock(flags); - - if (elp_debug >= 3) { - pr_debug("%s: rx DMA transfer started\n", dev->name); - } - - if (adapter->rx_active) - adapter->rx_active--; - - if (!adapter->busy) - pr_warning("%s: receive_packet called, busy not set.\n", dev->name); -} - -/****************************************************** - * - * interrupt handler - * - ******************************************************/ - -static irqreturn_t elp_interrupt(int irq, void *dev_id) -{ - int len; - int dlen; - int icount = 0; - struct net_device *dev = dev_id; - elp_device *adapter = netdev_priv(dev); - unsigned long timeout; - - spin_lock(&adapter->lock); - - do { - /* - * has a DMA transfer finished? - */ - if (inb_status(dev->base_addr) & DONE) { - if (!adapter->dmaing) - pr_warning("%s: phantom DMA completed\n", dev->name); - - if (elp_debug >= 3) - pr_debug("%s: %s DMA complete, status %02x\n", dev->name, - adapter->current_dma.direction ? "tx" : "rx", - inb_status(dev->base_addr)); - - outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev); - if (adapter->current_dma.direction) { - dev_kfree_skb_irq(adapter->current_dma.skb); - } else { - struct sk_buff *skb = adapter->current_dma.skb; - if (skb) { - if (adapter->current_dma.target) { - /* have already done the skb_put() */ - memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length); - } - skb->protocol = eth_type_trans(skb,dev); - dev->stats.rx_bytes += skb->len; - netif_rx(skb); - } - } - adapter->dmaing = 0; - if (adapter->rx_backlog.in != adapter->rx_backlog.out) { - int t = adapter->rx_backlog.length[adapter->rx_backlog.out]; - adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out); - if (elp_debug >= 2) - pr_debug("%s: receiving backlogged packet (%d)\n", dev->name, t); - receive_packet(dev, t); - } else { - adapter->busy = 0; - } - } else { - /* has one timed out? */ - check_3c505_dma(dev); - } - - /* - * receive a PCB from the adapter - */ - timeout = jiffies + 3*HZ/100; - while ((inb_status(dev->base_addr) & ACRF) != 0 && time_before(jiffies, timeout)) { - if (receive_pcb(dev, &adapter->irx_pcb)) { - switch (adapter->irx_pcb.command) - { - case 0: - break; - /* - * received a packet - this must be handled fast - */ - case 0xff: - case CMD_RECEIVE_PACKET_COMPLETE: - /* if the device isn't open, don't pass packets up the stack */ - if (!netif_running(dev)) - break; - len = adapter->irx_pcb.data.rcv_resp.pkt_len; - dlen = adapter->irx_pcb.data.rcv_resp.buf_len; - if (adapter->irx_pcb.data.rcv_resp.timeout != 0) { - pr_err("%s: interrupt - packet not received correctly\n", dev->name); - } else { - if (elp_debug >= 3) { - pr_debug("%s: interrupt - packet received of length %i (%i)\n", - dev->name, len, dlen); - } - if (adapter->irx_pcb.command == 0xff) { - if (elp_debug >= 2) - pr_debug("%s: adding packet to backlog (len = %d)\n", - dev->name, dlen); - adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen; - adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in); - } else { - receive_packet(dev, dlen); - } - if (elp_debug >= 3) - pr_debug("%s: packet received\n", dev->name); - } - break; - - /* - * 82586 configured correctly - */ - case CMD_CONFIGURE_82586_RESPONSE: - adapter->got[CMD_CONFIGURE_82586] = 1; - if (elp_debug >= 3) - pr_debug("%s: interrupt - configure response received\n", dev->name); - break; - - /* - * Adapter memory configuration - */ - case CMD_CONFIGURE_ADAPTER_RESPONSE: - adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1; - if (elp_debug >= 3) - pr_debug("%s: Adapter memory configuration %s.\n", dev->name, - adapter->irx_pcb.data.failed ? "failed" : "succeeded"); - break; - - /* - * Multicast list loading - */ - case CMD_LOAD_MULTICAST_RESPONSE: - adapter->got[CMD_LOAD_MULTICAST_LIST] = 1; - if (elp_debug >= 3) - pr_debug("%s: Multicast address list loading %s.\n", dev->name, - adapter->irx_pcb.data.failed ? "failed" : "succeeded"); - break; - - /* - * Station address setting - */ - case CMD_SET_ADDRESS_RESPONSE: - adapter->got[CMD_SET_STATION_ADDRESS] = 1; - if (elp_debug >= 3) - pr_debug("%s: Ethernet address setting %s.\n", dev->name, - adapter->irx_pcb.data.failed ? "failed" : "succeeded"); - break; - - - /* - * received board statistics - */ - case CMD_NETWORK_STATISTICS_RESPONSE: - dev->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv; - dev->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit; - dev->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC; - dev->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align; - dev->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun; - dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res; - adapter->got[CMD_NETWORK_STATISTICS] = 1; - if (elp_debug >= 3) - pr_debug("%s: interrupt - statistics response received\n", dev->name); - break; - - /* - * sent a packet - */ - case CMD_TRANSMIT_PACKET_COMPLETE: - if (elp_debug >= 3) - pr_debug("%s: interrupt - packet sent\n", dev->name); - if (!netif_running(dev)) - break; - switch (adapter->irx_pcb.data.xmit_resp.c_stat) { - case 0xffff: - dev->stats.tx_aborted_errors++; - pr_info("%s: transmit timed out, network cable problem?\n", dev->name); - break; - case 0xfffe: - dev->stats.tx_fifo_errors++; - pr_info("%s: transmit timed out, FIFO underrun\n", dev->name); - break; - } - netif_wake_queue(dev); - break; - - /* - * some unknown PCB - */ - default: - pr_debug("%s: unknown PCB received - %2.2x\n", - dev->name, adapter->irx_pcb.command); - break; - } - } else { - pr_warning("%s: failed to read PCB on interrupt\n", dev->name); - adapter_reset(dev); - } - } - - } while (icount++ < 5 && (inb_status(dev->base_addr) & (ACRF | DONE))); - - prime_rx(dev); - - /* - * indicate no longer in interrupt routine - */ - spin_unlock(&adapter->lock); - return IRQ_HANDLED; -} - - -/****************************************************** - * - * open the board - * - ******************************************************/ - -static int elp_open(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - int retval; - - if (elp_debug >= 3) - pr_debug("%s: request to open device\n", dev->name); - - /* - * make sure we actually found the device - */ - if (adapter == NULL) { - pr_err("%s: Opening a non-existent physical device\n", dev->name); - return -EAGAIN; - } - /* - * disable interrupts on the board - */ - outb_control(0, dev); - - /* - * clear any pending interrupts - */ - inb_command(dev->base_addr); - adapter_reset(dev); - - /* - * no receive PCBs active - */ - adapter->rx_active = 0; - - adapter->busy = 0; - adapter->send_pcb_semaphore = 0; - adapter->rx_backlog.in = 0; - adapter->rx_backlog.out = 0; - - spin_lock_init(&adapter->lock); - - /* - * install our interrupt service routine - */ - if ((retval = request_irq(dev->irq, elp_interrupt, 0, dev->name, dev))) { - pr_err("%s: could not allocate IRQ%d\n", dev->name, dev->irq); - return retval; - } - if ((retval = request_dma(dev->dma, dev->name))) { - free_irq(dev->irq, dev); - pr_err("%s: could not allocate DMA%d channel\n", dev->name, dev->dma); - return retval; - } - adapter->dma_buffer = (void *) dma_mem_alloc(DMA_BUFFER_SIZE); - if (!adapter->dma_buffer) { - pr_err("%s: could not allocate DMA buffer\n", dev->name); - free_dma(dev->dma); - free_irq(dev->irq, dev); - return -ENOMEM; - } - adapter->dmaing = 0; - - /* - * enable interrupts on the board - */ - outb_control(CMDE, dev); - - /* - * configure adapter memory: we need 10 multicast addresses, default==0 - */ - if (elp_debug >= 3) - pr_debug("%s: sending 3c505 memory configuration command\n", dev->name); - adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY; - adapter->tx_pcb.data.memconf.cmd_q = 10; - adapter->tx_pcb.data.memconf.rcv_q = 20; - adapter->tx_pcb.data.memconf.mcast = 10; - adapter->tx_pcb.data.memconf.frame = 20; - adapter->tx_pcb.data.memconf.rcv_b = 20; - adapter->tx_pcb.data.memconf.progs = 0; - adapter->tx_pcb.length = sizeof(struct Memconf); - adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0; - if (!send_pcb(dev, &adapter->tx_pcb)) - pr_err("%s: couldn't send memory configuration command\n", dev->name); - else { - unsigned long timeout = jiffies + TIMEOUT; - while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) - TIMEOUT_MSG(__LINE__); - } - - - /* - * configure adapter to receive broadcast messages and wait for response - */ - if (elp_debug >= 3) - pr_debug("%s: sending 82586 configure command\n", dev->name); - adapter->tx_pcb.command = CMD_CONFIGURE_82586; - adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD; - adapter->tx_pcb.length = 2; - adapter->got[CMD_CONFIGURE_82586] = 0; - if (!send_pcb(dev, &adapter->tx_pcb)) - pr_err("%s: couldn't send 82586 configure command\n", dev->name); - else { - unsigned long timeout = jiffies + TIMEOUT; - while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) - TIMEOUT_MSG(__LINE__); - } - - /* enable burst-mode DMA */ - /* outb(0x1, dev->base_addr + PORT_AUXDMA); */ - - /* - * queue receive commands to provide buffering - */ - prime_rx(dev); - if (elp_debug >= 3) - pr_debug("%s: %d receive PCBs active\n", dev->name, adapter->rx_active); - - /* - * device is now officially open! - */ - - netif_start_queue(dev); - return 0; -} - - -/****************************************************** - * - * send a packet to the adapter - * - ******************************************************/ - -static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb) -{ - elp_device *adapter = netdev_priv(dev); - unsigned long target; - unsigned long flags; - - /* - * make sure the length is even and no shorter than 60 bytes - */ - unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1); - - if (test_and_set_bit(0, (void *) &adapter->busy)) { - if (elp_debug >= 2) - pr_debug("%s: transmit blocked\n", dev->name); - return false; - } - - dev->stats.tx_bytes += nlen; - - /* - * send the adapter a transmit packet command. Ignore segment and offset - * and make sure the length is even - */ - adapter->tx_pcb.command = CMD_TRANSMIT_PACKET; - adapter->tx_pcb.length = sizeof(struct Xmit_pkt); - adapter->tx_pcb.data.xmit_pkt.buf_ofs - = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; /* Unused */ - adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen; - - if (!send_pcb(dev, &adapter->tx_pcb)) { - adapter->busy = 0; - return false; - } - /* if this happens, we die */ - if (test_and_set_bit(0, (void *) &adapter->dmaing)) - pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction); - - adapter->current_dma.direction = 1; - adapter->current_dma.start_time = jiffies; - - if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) { - skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen); - memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len); - target = isa_virt_to_bus(adapter->dma_buffer); - } - else { - target = isa_virt_to_bus(skb->data); - } - adapter->current_dma.skb = skb; - - flags=claim_dma_lock(); - disable_dma(dev->dma); - clear_dma_ff(dev->dma); - set_dma_mode(dev->dma, 0x48); /* dma memory -> io */ - set_dma_addr(dev->dma, target); - set_dma_count(dev->dma, nlen); - outb_control(adapter->hcr_val | DMAE | TCEN, dev); - enable_dma(dev->dma); - release_dma_lock(flags); - - if (elp_debug >= 3) - pr_debug("%s: DMA transfer started\n", dev->name); - - return true; -} - -/* - * The upper layer thinks we timed out - */ - -static void elp_timeout(struct net_device *dev) -{ - int stat; - - stat = inb_status(dev->base_addr); - pr_warning("%s: transmit timed out, lost %s?\n", dev->name, - (stat & ACRF) ? "interrupt" : "command"); - if (elp_debug >= 1) - pr_debug("%s: status %#02x\n", dev->name, stat); - dev->trans_start = jiffies; /* prevent tx timeout */ - dev->stats.tx_dropped++; - netif_wake_queue(dev); -} - -/****************************************************** - * - * start the transmitter - * return 0 if sent OK, else return 1 - * - ******************************************************/ - -static netdev_tx_t elp_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - unsigned long flags; - elp_device *adapter = netdev_priv(dev); - - spin_lock_irqsave(&adapter->lock, flags); - check_3c505_dma(dev); - - if (elp_debug >= 3) - pr_debug("%s: request to send packet of length %d\n", dev->name, (int) skb->len); - - netif_stop_queue(dev); - - /* - * send the packet at skb->data for skb->len - */ - if (!send_packet(dev, skb)) { - if (elp_debug >= 2) { - pr_debug("%s: failed to transmit packet\n", dev->name); - } - spin_unlock_irqrestore(&adapter->lock, flags); - return NETDEV_TX_BUSY; - } - if (elp_debug >= 3) - pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len); - - prime_rx(dev); - spin_unlock_irqrestore(&adapter->lock, flags); - netif_start_queue(dev); - return NETDEV_TX_OK; -} - -/****************************************************** - * - * return statistics on the board - * - ******************************************************/ - -static struct net_device_stats *elp_get_stats(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - - if (elp_debug >= 3) - pr_debug("%s: request for stats\n", dev->name); - - /* If the device is closed, just return the latest stats we have, - - we cannot ask from the adapter without interrupts */ - if (!netif_running(dev)) - return &dev->stats; - - /* send a get statistics command to the board */ - adapter->tx_pcb.command = CMD_NETWORK_STATISTICS; - adapter->tx_pcb.length = 0; - adapter->got[CMD_NETWORK_STATISTICS] = 0; - if (!send_pcb(dev, &adapter->tx_pcb)) - pr_err("%s: couldn't send get statistics command\n", dev->name); - else { - unsigned long timeout = jiffies + TIMEOUT; - while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) { - TIMEOUT_MSG(__LINE__); - return &dev->stats; - } - } - - /* statistics are now up to date */ - return &dev->stats; -} - - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); -} - -static u32 netdev_get_msglevel(struct net_device *dev) -{ - return debug; -} - -static void netdev_set_msglevel(struct net_device *dev, u32 level) -{ - debug = level; -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, - .get_msglevel = netdev_get_msglevel, - .set_msglevel = netdev_set_msglevel, -}; - -/****************************************************** - * - * close the board - * - ******************************************************/ - -static int elp_close(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - - if (elp_debug >= 3) - pr_debug("%s: request to close device\n", dev->name); - - netif_stop_queue(dev); - - /* Someone may request the device statistic information even when - * the interface is closed. The following will update the statistics - * structure in the driver, so we'll be able to give current statistics. - */ - (void) elp_get_stats(dev); - - /* - * disable interrupts on the board - */ - outb_control(0, dev); - - /* - * release the IRQ - */ - free_irq(dev->irq, dev); - - free_dma(dev->dma); - free_pages((unsigned long) adapter->dma_buffer, get_order(DMA_BUFFER_SIZE)); - - return 0; -} - - -/************************************************************ - * - * Set multicast list - * num_addrs==0: clear mc_list - * num_addrs==-1: set promiscuous mode - * num_addrs>0: set mc_list - * - ************************************************************/ - -static void elp_set_mc_list(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - struct netdev_hw_addr *ha; - int i; - unsigned long flags; - - if (elp_debug >= 3) - pr_debug("%s: request to set multicast list\n", dev->name); - - spin_lock_irqsave(&adapter->lock, flags); - - if (!(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { - /* send a "load multicast list" command to the board, max 10 addrs/cmd */ - /* if num_addrs==0 the list will be cleared */ - adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST; - adapter->tx_pcb.length = 6 * netdev_mc_count(dev); - i = 0; - netdev_for_each_mc_addr(ha, dev) - memcpy(adapter->tx_pcb.data.multicast[i++], - ha->addr, 6); - adapter->got[CMD_LOAD_MULTICAST_LIST] = 0; - if (!send_pcb(dev, &adapter->tx_pcb)) - pr_err("%s: couldn't send set_multicast command\n", dev->name); - else { - unsigned long timeout = jiffies + TIMEOUT; - while (adapter->got[CMD_LOAD_MULTICAST_LIST] == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) { - TIMEOUT_MSG(__LINE__); - } - } - if (!netdev_mc_empty(dev)) - adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI; - else /* num_addrs == 0 */ - adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD; - } else - adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_PROMISC; - /* - * configure adapter to receive messages (as specified above) - * and wait for response - */ - if (elp_debug >= 3) - pr_debug("%s: sending 82586 configure command\n", dev->name); - adapter->tx_pcb.command = CMD_CONFIGURE_82586; - adapter->tx_pcb.length = 2; - adapter->got[CMD_CONFIGURE_82586] = 0; - if (!send_pcb(dev, &adapter->tx_pcb)) - { - spin_unlock_irqrestore(&adapter->lock, flags); - pr_err("%s: couldn't send 82586 configure command\n", dev->name); - } - else { - unsigned long timeout = jiffies + TIMEOUT; - spin_unlock_irqrestore(&adapter->lock, flags); - while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) - TIMEOUT_MSG(__LINE__); - } -} - -/************************************************************ - * - * A couple of tests to see if there's 3C505 or not - * Called only by elp_autodetect - ************************************************************/ - -static int __init elp_sense(struct net_device *dev) -{ - int addr = dev->base_addr; - const char *name = dev->name; - byte orig_HSR; - - if (!request_region(addr, ELP_IO_EXTENT, "3c505")) - return -ENODEV; - - orig_HSR = inb_status(addr); - - if (elp_debug > 0) - pr_debug(search_msg, name, addr); - - if (orig_HSR == 0xff) { - if (elp_debug > 0) - pr_cont(notfound_msg, 1); - goto out; - } - - /* Wait for a while; the adapter may still be booting up */ - if (elp_debug > 0) - pr_cont(stilllooking_msg); - - if (orig_HSR & DIR) { - /* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */ - outb(0, dev->base_addr + PORT_CONTROL); - msleep(300); - if (inb_status(addr) & DIR) { - if (elp_debug > 0) - pr_cont(notfound_msg, 2); - goto out; - } - } else { - /* If HCR.DIR is down, we pull it up. HSR.DIR should follow. */ - outb(DIR, dev->base_addr + PORT_CONTROL); - msleep(300); - if (!(inb_status(addr) & DIR)) { - if (elp_debug > 0) - pr_cont(notfound_msg, 3); - goto out; - } - } - /* - * It certainly looks like a 3c505. - */ - if (elp_debug > 0) - pr_cont(found_msg); - - return 0; -out: - release_region(addr, ELP_IO_EXTENT); - return -ENODEV; -} - -/************************************************************* - * - * Search through addr_list[] and try to find a 3C505 - * Called only by eplus_probe - *************************************************************/ - -static int __init elp_autodetect(struct net_device *dev) -{ - int idx = 0; - - /* if base address set, then only check that address - otherwise, run through the table */ - if (dev->base_addr != 0) { /* dev->base_addr == 0 ==> plain autodetect */ - if (elp_sense(dev) == 0) - return dev->base_addr; - } else - while ((dev->base_addr = addr_list[idx++])) { - if (elp_sense(dev) == 0) - return dev->base_addr; - } - - /* could not find an adapter */ - if (elp_debug > 0) - pr_debug(couldnot_msg, dev->name); - - return 0; /* Because of this, the layer above will return -ENODEV */ -} - -static const struct net_device_ops elp_netdev_ops = { - .ndo_open = elp_open, - .ndo_stop = elp_close, - .ndo_get_stats = elp_get_stats, - .ndo_start_xmit = elp_start_xmit, - .ndo_tx_timeout = elp_timeout, - .ndo_set_rx_mode = elp_set_mc_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/****************************************************** - * - * probe for an Etherlink Plus board at the specified address - * - ******************************************************/ - -/* There are three situations we need to be able to detect here: - - * a) the card is idle - * b) the card is still booting up - * c) the card is stuck in a strange state (some DOS drivers do this) - * - * In case (a), all is well. In case (b), we wait 10 seconds to see if the - * card finishes booting, and carry on if so. In case (c), we do a hard reset, - * loop round, and hope for the best. - * - * This is all very unpleasant, but hopefully avoids the problems with the old - * probe code (which had a 15-second delay if the card was idle, and didn't - * work at all if it was in a weird state). - */ - -static int __init elplus_setup(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - int i, tries, tries1, okay; - unsigned long timeout; - unsigned long cookie = 0; - int err = -ENODEV; - - /* - * setup adapter structure - */ - - dev->base_addr = elp_autodetect(dev); - if (!dev->base_addr) - return -ENODEV; - - adapter->send_pcb_semaphore = 0; - - for (tries1 = 0; tries1 < 3; tries1++) { - outb_control((adapter->hcr_val | CMDE) & ~DIR, dev); - /* First try to write just one byte, to see if the card is - * responding at all normally. - */ - timeout = jiffies + 5*HZ/100; - okay = 0; - while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE)); - if ((inb_status(dev->base_addr) & HCRE)) { - outb_command(0, dev->base_addr); /* send a spurious byte */ - timeout = jiffies + 5*HZ/100; - while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE)); - if (inb_status(dev->base_addr) & HCRE) - okay = 1; - } - if (!okay) { - /* Nope, it's ignoring the command register. This means that - * either it's still booting up, or it's died. - */ - pr_err("%s: command register wouldn't drain, ", dev->name); - if ((inb_status(dev->base_addr) & 7) == 3) { - /* If the adapter status is 3, it *could* still be booting. - * Give it the benefit of the doubt for 10 seconds. - */ - pr_cont("assuming 3c505 still starting\n"); - timeout = jiffies + 10*HZ; - while (time_before(jiffies, timeout) && (inb_status(dev->base_addr) & 7)); - if (inb_status(dev->base_addr) & 7) { - pr_err("%s: 3c505 failed to start\n", dev->name); - } else { - okay = 1; /* It started */ - } - } else { - /* Otherwise, it must just be in a strange - * state. We probably need to kick it. - */ - pr_cont("3c505 is sulking\n"); - } - } - for (tries = 0; tries < 5 && okay; tries++) { - - /* - * Try to set the Ethernet address, to make sure that the board - * is working. - */ - adapter->tx_pcb.command = CMD_STATION_ADDRESS; - adapter->tx_pcb.length = 0; - cookie = probe_irq_on(); - if (!send_pcb(dev, &adapter->tx_pcb)) { - pr_err("%s: could not send first PCB\n", dev->name); - probe_irq_off(cookie); - continue; - } - if (!receive_pcb(dev, &adapter->rx_pcb)) { - pr_err("%s: could not read first PCB\n", dev->name); - probe_irq_off(cookie); - continue; - } - if ((adapter->rx_pcb.command != CMD_ADDRESS_RESPONSE) || - (adapter->rx_pcb.length != 6)) { - pr_err("%s: first PCB wrong (%d, %d)\n", dev->name, - adapter->rx_pcb.command, adapter->rx_pcb.length); - probe_irq_off(cookie); - continue; - } - goto okay; - } - /* It's broken. Do a hard reset to re-initialise the board, - * and try again. - */ - pr_info("%s: resetting adapter\n", dev->name); - outb_control(adapter->hcr_val | FLSH | ATTN, dev); - outb_control(adapter->hcr_val & ~(FLSH | ATTN), dev); - } - pr_err("%s: failed to initialise 3c505\n", dev->name); - goto out; - - okay: - if (dev->irq) { /* Is there a preset IRQ? */ - int rpt = probe_irq_off(cookie); - if (dev->irq != rpt) { - pr_warning("%s: warning, irq %d configured but %d detected\n", dev->name, dev->irq, rpt); - } - /* if dev->irq == probe_irq_off(cookie), all is well */ - } else /* No preset IRQ; just use what we can detect */ - dev->irq = probe_irq_off(cookie); - switch (dev->irq) { /* Legal, sane? */ - case 0: - pr_err("%s: IRQ probe failed: check 3c505 jumpers.\n", - dev->name); - goto out; - case 1: - case 6: - case 8: - case 13: - pr_err("%s: Impossible IRQ %d reported by probe_irq_off().\n", - dev->name, dev->irq); - goto out; - } - /* - * Now we have the IRQ number so we can disable the interrupts from - * the board until the board is opened. - */ - outb_control(adapter->hcr_val & ~CMDE, dev); - - /* - * copy Ethernet address into structure - */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = adapter->rx_pcb.data.eth_addr[i]; - - /* find a DMA channel */ - if (!dev->dma) { - if (dev->mem_start) { - dev->dma = dev->mem_start & 7; - } - else { - pr_warning("%s: warning, DMA channel not specified, using default\n", dev->name); - dev->dma = ELP_DMA; - } - } - - /* - * print remainder of startup message - */ - pr_info("%s: 3c505 at %#lx, irq %d, dma %d, addr %pM, ", - dev->name, dev->base_addr, dev->irq, dev->dma, dev->dev_addr); - /* - * read more information from the adapter - */ - - adapter->tx_pcb.command = CMD_ADAPTER_INFO; - adapter->tx_pcb.length = 0; - if (!send_pcb(dev, &adapter->tx_pcb) || - !receive_pcb(dev, &adapter->rx_pcb) || - (adapter->rx_pcb.command != CMD_ADAPTER_INFO_RESPONSE) || - (adapter->rx_pcb.length != 10)) { - pr_cont("not responding to second PCB\n"); - } - pr_cont("rev %d.%d, %dk\n", adapter->rx_pcb.data.info.major_vers, - adapter->rx_pcb.data.info.minor_vers, adapter->rx_pcb.data.info.RAM_sz); - - /* - * reconfigure the adapter memory to better suit our purposes - */ - adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY; - adapter->tx_pcb.length = 12; - adapter->tx_pcb.data.memconf.cmd_q = 8; - adapter->tx_pcb.data.memconf.rcv_q = 8; - adapter->tx_pcb.data.memconf.mcast = 10; - adapter->tx_pcb.data.memconf.frame = 10; - adapter->tx_pcb.data.memconf.rcv_b = 10; - adapter->tx_pcb.data.memconf.progs = 0; - if (!send_pcb(dev, &adapter->tx_pcb) || - !receive_pcb(dev, &adapter->rx_pcb) || - (adapter->rx_pcb.command != CMD_CONFIGURE_ADAPTER_RESPONSE) || - (adapter->rx_pcb.length != 2)) { - pr_err("%s: could not configure adapter memory\n", dev->name); - } - if (adapter->rx_pcb.data.configure) { - pr_err("%s: adapter configuration failed\n", dev->name); - } - - dev->netdev_ops = &elp_netdev_ops; - dev->watchdog_timeo = 10*HZ; - dev->ethtool_ops = &netdev_ethtool_ops; /* local */ - - dev->mem_start = dev->mem_end = 0; - - err = register_netdev(dev); - if (err) - goto out; - - return 0; -out: - release_region(dev->base_addr, ELP_IO_EXTENT); - return err; -} - -#ifndef MODULE -struct net_device * __init elplus_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(elp_device)); - int err; - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = elplus_setup(dev); - if (err) { - free_netdev(dev); - return ERR_PTR(err); - } - return dev; -} - -#else -static struct net_device *dev_3c505[ELP_MAX_CARDS]; -static int io[ELP_MAX_CARDS]; -static int irq[ELP_MAX_CARDS]; -static int dma[ELP_MAX_CARDS]; -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(dma, int, NULL, 0); -MODULE_PARM_DESC(io, "EtherLink Plus I/O base address(es)"); -MODULE_PARM_DESC(irq, "EtherLink Plus IRQ number(s) (assigned)"); -MODULE_PARM_DESC(dma, "EtherLink Plus DMA channel(s)"); - -int __init init_module(void) -{ - int this_dev, found = 0; - - for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) { - struct net_device *dev = alloc_etherdev(sizeof(elp_device)); - if (!dev) - break; - - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - if (dma[this_dev]) { - dev->dma = dma[this_dev]; - } else { - dev->dma = ELP_DMA; - pr_warning("3c505.c: warning, using default DMA channel,\n"); - } - if (io[this_dev] == 0) { - if (this_dev) { - free_netdev(dev); - break; - } - pr_notice("3c505.c: module autoprobe not recommended, give io=xx.\n"); - } - if (elplus_setup(dev) != 0) { - pr_warning("3c505.c: Failed to register card at 0x%x.\n", io[this_dev]); - free_netdev(dev); - break; - } - dev_3c505[this_dev] = dev; - found++; - } - if (!found) - return -ENODEV; - return 0; -} - -void __exit cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) { - struct net_device *dev = dev_3c505[this_dev]; - if (dev) { - unregister_netdev(dev); - release_region(dev->base_addr, ELP_IO_EXTENT); - free_netdev(dev); - } - } -} - -#endif /* MODULE */ -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/i825xx/3c505.h b/drivers/net/ethernet/i825xx/3c505.h deleted file mode 100644 index 04df2a9002b6..000000000000 --- a/drivers/net/ethernet/i825xx/3c505.h +++ /dev/null @@ -1,292 +0,0 @@ -/***************************************************************** - * - * defines for 3Com Etherlink Plus adapter - * - *****************************************************************/ - -#define ELP_DMA 6 -#define ELP_RX_PCBS 4 -#define ELP_MAX_CARDS 4 - -/* - * I/O register offsets - */ -#define PORT_COMMAND 0x00 /* read/write, 8-bit */ -#define PORT_STATUS 0x02 /* read only, 8-bit */ -#define PORT_AUXDMA 0x02 /* write only, 8-bit */ -#define PORT_DATA 0x04 /* read/write, 16-bit */ -#define PORT_CONTROL 0x06 /* read/write, 8-bit */ - -#define ELP_IO_EXTENT 0x10 /* size of used IO registers */ - -/* - * host control registers bits - */ -#define ATTN 0x80 /* attention */ -#define FLSH 0x40 /* flush data register */ -#define DMAE 0x20 /* DMA enable */ -#define DIR 0x10 /* direction */ -#define TCEN 0x08 /* terminal count interrupt enable */ -#define CMDE 0x04 /* command register interrupt enable */ -#define HSF2 0x02 /* host status flag 2 */ -#define HSF1 0x01 /* host status flag 1 */ - -/* - * combinations of HSF flags used for PCB transmission - */ -#define HSF_PCB_ACK HSF1 -#define HSF_PCB_NAK HSF2 -#define HSF_PCB_END (HSF2|HSF1) -#define HSF_PCB_MASK (HSF2|HSF1) - -/* - * host status register bits - */ -#define HRDY 0x80 /* data register ready */ -#define HCRE 0x40 /* command register empty */ -#define ACRF 0x20 /* adapter command register full */ -/* #define DIR 0x10 direction - same as in control register */ -#define DONE 0x08 /* DMA done */ -#define ASF3 0x04 /* adapter status flag 3 */ -#define ASF2 0x02 /* adapter status flag 2 */ -#define ASF1 0x01 /* adapter status flag 1 */ - -/* - * combinations of ASF flags used for PCB reception - */ -#define ASF_PCB_ACK ASF1 -#define ASF_PCB_NAK ASF2 -#define ASF_PCB_END (ASF2|ASF1) -#define ASF_PCB_MASK (ASF2|ASF1) - -/* - * host aux DMA register bits - */ -#define DMA_BRST 0x01 /* DMA burst */ - -/* - * maximum amount of data allowed in a PCB - */ -#define MAX_PCB_DATA 62 - -/***************************************************************** - * - * timeout value - * this is a rough value used for loops to stop them from - * locking up the whole machine in the case of failure or - * error conditions - * - *****************************************************************/ - -#define TIMEOUT 300 - -/***************************************************************** - * - * PCB commands - * - *****************************************************************/ - -enum { - /* - * host PCB commands - */ - CMD_CONFIGURE_ADAPTER_MEMORY = 0x01, - CMD_CONFIGURE_82586 = 0x02, - CMD_STATION_ADDRESS = 0x03, - CMD_DMA_DOWNLOAD = 0x04, - CMD_DMA_UPLOAD = 0x05, - CMD_PIO_DOWNLOAD = 0x06, - CMD_PIO_UPLOAD = 0x07, - CMD_RECEIVE_PACKET = 0x08, - CMD_TRANSMIT_PACKET = 0x09, - CMD_NETWORK_STATISTICS = 0x0a, - CMD_LOAD_MULTICAST_LIST = 0x0b, - CMD_CLEAR_PROGRAM = 0x0c, - CMD_DOWNLOAD_PROGRAM = 0x0d, - CMD_EXECUTE_PROGRAM = 0x0e, - CMD_SELF_TEST = 0x0f, - CMD_SET_STATION_ADDRESS = 0x10, - CMD_ADAPTER_INFO = 0x11, - NUM_TRANSMIT_CMDS, - - /* - * adapter PCB commands - */ - CMD_CONFIGURE_ADAPTER_RESPONSE = 0x31, - CMD_CONFIGURE_82586_RESPONSE = 0x32, - CMD_ADDRESS_RESPONSE = 0x33, - CMD_DOWNLOAD_DATA_REQUEST = 0x34, - CMD_UPLOAD_DATA_REQUEST = 0x35, - CMD_RECEIVE_PACKET_COMPLETE = 0x38, - CMD_TRANSMIT_PACKET_COMPLETE = 0x39, - CMD_NETWORK_STATISTICS_RESPONSE = 0x3a, - CMD_LOAD_MULTICAST_RESPONSE = 0x3b, - CMD_CLEAR_PROGRAM_RESPONSE = 0x3c, - CMD_DOWNLOAD_PROGRAM_RESPONSE = 0x3d, - CMD_EXECUTE_RESPONSE = 0x3e, - CMD_SELF_TEST_RESPONSE = 0x3f, - CMD_SET_ADDRESS_RESPONSE = 0x40, - CMD_ADAPTER_INFO_RESPONSE = 0x41 -}; - -/* Definitions for the PCB data structure */ - -/* Data units */ -typedef unsigned char byte; -typedef unsigned short int word; -typedef unsigned long int dword; - -/* Data structures */ -struct Memconf { - word cmd_q, - rcv_q, - mcast, - frame, - rcv_b, - progs; -}; - -struct Rcv_pkt { - word buf_ofs, - buf_seg, - buf_len, - timeout; -}; - -struct Xmit_pkt { - word buf_ofs, - buf_seg, - pkt_len; -}; - -struct Rcv_resp { - word buf_ofs, - buf_seg, - buf_len, - pkt_len, - timeout, - status; - dword timetag; -}; - -struct Xmit_resp { - word buf_ofs, - buf_seg, - c_stat, - status; -}; - - -struct Netstat { - dword tot_recv, - tot_xmit; - word err_CRC, - err_align, - err_res, - err_ovrrun; -}; - - -struct Selftest { - word error; - union { - word ROM_cksum; - struct { - word ofs, seg; - } RAM; - word i82586; - } failure; -}; - -struct Info { - byte minor_vers, - major_vers; - word ROM_cksum, - RAM_sz, - free_ofs, - free_seg; -}; - -struct Memdump { - word size, - off, - seg; -}; - -/* -Primary Command Block. The most important data structure. All communication -between the host and the adapter is done with these. (Except for the actual -Ethernet data, which has different packaging.) -*/ -typedef struct { - byte command; - byte length; - union { - struct Memconf memconf; - word configure; - struct Rcv_pkt rcv_pkt; - struct Xmit_pkt xmit_pkt; - byte multicast[10][6]; - byte eth_addr[6]; - byte failed; - struct Rcv_resp rcv_resp; - struct Xmit_resp xmit_resp; - struct Netstat netstat; - struct Selftest selftest; - struct Info info; - struct Memdump memdump; - byte raw[62]; - } data; -} pcb_struct; - -/* These defines for 'configure' */ -#define RECV_STATION 0x00 -#define RECV_BROAD 0x01 -#define RECV_MULTI 0x02 -#define RECV_PROMISC 0x04 -#define NO_LOOPBACK 0x00 -#define INT_LOOPBACK 0x08 -#define EXT_LOOPBACK 0x10 - -/***************************************************************** - * - * structure to hold context information for adapter - * - *****************************************************************/ - -#define DMA_BUFFER_SIZE 1600 -#define BACKLOG_SIZE 4 - -typedef struct { - volatile short got[NUM_TRANSMIT_CMDS]; /* flags for - command completion */ - pcb_struct tx_pcb; /* PCB for foreground sending */ - pcb_struct rx_pcb; /* PCB for foreground receiving */ - pcb_struct itx_pcb; /* PCB for background sending */ - pcb_struct irx_pcb; /* PCB for background receiving */ - - void *dma_buffer; - - struct { - unsigned int length[BACKLOG_SIZE]; - unsigned int in; - unsigned int out; - } rx_backlog; - - struct { - unsigned int direction; - unsigned int length; - struct sk_buff *skb; - void *target; - unsigned long start_time; - } current_dma; - - /* flags */ - unsigned long send_pcb_semaphore; - unsigned long dmaing; - unsigned long busy; - - unsigned int rx_active; /* number of receive PCBs */ - volatile unsigned char hcr_val; /* what we think the HCR contains */ - spinlock_t lock; /* Interrupt v tx lock */ -} elp_device; diff --git a/drivers/net/ethernet/i825xx/3c507.c b/drivers/net/ethernet/i825xx/3c507.c deleted file mode 100644 index e8984b059905..000000000000 --- a/drivers/net/ethernet/i825xx/3c507.c +++ /dev/null @@ -1,938 +0,0 @@ -/* 3c507.c: An EtherLink16 device driver for Linux. */ -/* - Written 1993,1994 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - - Thanks go to jennings@Montrouge.SMR.slb.com ( Patrick Jennings) - and jrs@world.std.com (Rick Sladkey) for testing and bugfixes. - Mark Salazar <leslie@access.digex.net> made the changes for cards with - only 16K packet buffers. - - Things remaining to do: - Verify that the tx and rx buffers don't have fencepost errors. - Move the theory of operation and memory map documentation. - The statistics need to be updated correctly. -*/ - -#define DRV_NAME "3c507" -#define DRV_VERSION "1.10a" -#define DRV_RELDATE "11/17/2001" - -static const char version[] = - DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n"; - -/* - Sources: - This driver wouldn't have been written with the availability of the - Crynwr driver source code. It provided a known-working implementation - that filled in the gaping holes of the Intel documentation. Three cheers - for Russ Nelson. - - Intel Microcommunications Databook, Vol. 1, 1990. It provides just enough - info that the casual reader might think that it documents the i82586 :-<. -*/ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/string.h> -#include <linux/spinlock.h> -#include <linux/ethtool.h> -#include <linux/errno.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/if_ether.h> -#include <linux/skbuff.h> -#include <linux/init.h> -#include <linux/bitops.h> - -#include <asm/dma.h> -#include <asm/io.h> -#include <asm/uaccess.h> - -/* use 0 for production, 1 for verification, 2..7 for debug */ -#ifndef NET_DEBUG -#define NET_DEBUG 1 -#endif -static unsigned int net_debug = NET_DEBUG; -#define debug net_debug - - -/* - Details of the i82586. - - You'll really need the databook to understand the details of this part, - but the outline is that the i82586 has two separate processing units. - Both are started from a list of three configuration tables, of which only - the last, the System Control Block (SCB), is used after reset-time. The SCB - has the following fields: - Status word - Command word - Tx/Command block addr. - Rx block addr. - The command word accepts the following controls for the Tx and Rx units: - */ - -#define CUC_START 0x0100 -#define CUC_RESUME 0x0200 -#define CUC_SUSPEND 0x0300 -#define RX_START 0x0010 -#define RX_RESUME 0x0020 -#define RX_SUSPEND 0x0030 - -/* The Rx unit uses a list of frame descriptors and a list of data buffer - descriptors. We use full-sized (1518 byte) data buffers, so there is - a one-to-one pairing of frame descriptors to buffer descriptors. - - The Tx ("command") unit executes a list of commands that look like: - Status word Written by the 82586 when the command is done. - Command word Command in lower 3 bits, post-command action in upper 3 - Link word The address of the next command. - Parameters (as needed). - - Some definitions related to the Command Word are: - */ -#define CMD_EOL 0x8000 /* The last command of the list, stop. */ -#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ -#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ - -enum commands { - CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, - CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7}; - -/* Information that need to be kept for each board. */ -struct net_local { - int last_restart; - ushort rx_head; - ushort rx_tail; - ushort tx_head; - ushort tx_cmd_link; - ushort tx_reap; - ushort tx_pkts_in_ring; - spinlock_t lock; - void __iomem *base; -}; - -/* - Details of the EtherLink16 Implementation - The 3c507 is a generic shared-memory i82586 implementation. - The host can map 16K, 32K, 48K, or 64K of the 64K memory into - 0x0[CD][08]0000, or all 64K into 0xF[02468]0000. - */ - -/* Offsets from the base I/O address. */ -#define SA_DATA 0 /* Station address data, or 3Com signature. */ -#define MISC_CTRL 6 /* Switch the SA_DATA banks, and bus config bits. */ -#define RESET_IRQ 10 /* Reset the latched IRQ line. */ -#define SIGNAL_CA 11 /* Frob the 82586 Channel Attention line. */ -#define ROM_CONFIG 13 -#define MEM_CONFIG 14 -#define IRQ_CONFIG 15 -#define EL16_IO_EXTENT 16 - -/* The ID port is used at boot-time to locate the ethercard. */ -#define ID_PORT 0x100 - -/* Offsets to registers in the mailbox (SCB). */ -#define iSCB_STATUS 0x8 -#define iSCB_CMD 0xA -#define iSCB_CBL 0xC /* Command BLock offset. */ -#define iSCB_RFA 0xE /* Rx Frame Area offset. */ - -/* Since the 3c507 maps the shared memory window so that the last byte is - at 82586 address FFFF, the first byte is at 82586 address 0, 16K, 32K, or - 48K corresponding to window sizes of 64K, 48K, 32K and 16K respectively. - We can account for this be setting the 'SBC Base' entry in the ISCP table - below for all the 16 bit offset addresses, and also adding the 'SCB Base' - value to all 24 bit physical addresses (in the SCP table and the TX and RX - Buffer Descriptors). - -Mark - */ -#define SCB_BASE ((unsigned)64*1024 - (dev->mem_end - dev->mem_start)) - -/* - What follows in 'init_words[]' is the "program" that is downloaded to the - 82586 memory. It's mostly tables and command blocks, and starts at the - reset address 0xfffff6. This is designed to be similar to the EtherExpress, - thus the unusual location of the SCB at 0x0008. - - Even with the additional "don't care" values, doing it this way takes less - program space than initializing the individual tables, and I feel it's much - cleaner. - - The databook is particularly useless for the first two structures, I had - to use the Crynwr driver as an example. - - The memory setup is as follows: - */ - -#define CONFIG_CMD 0x0018 -#define SET_SA_CMD 0x0024 -#define SA_OFFSET 0x002A -#define IDLELOOP 0x30 -#define TDR_CMD 0x38 -#define TDR_TIME 0x3C -#define DUMP_CMD 0x40 -#define DIAG_CMD 0x48 -#define SET_MC_CMD 0x4E -#define DUMP_DATA 0x56 /* A 170 byte buffer for dump and Set-MC into. */ - -#define TX_BUF_START 0x0100 -#define NUM_TX_BUFS 5 -#define TX_BUF_SIZE (1518+14+20+16) /* packet+header+TBD */ - -#define RX_BUF_START 0x2000 -#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */ -#define RX_BUF_END (dev->mem_end - dev->mem_start) - -#define TX_TIMEOUT (HZ/20) - -/* - That's it: only 86 bytes to set up the beast, including every extra - command available. The 170 byte buffer at DUMP_DATA is shared between the - Dump command (called only by the diagnostic program) and the SetMulticastList - command. - - To complete the memory setup you only have to write the station address at - SA_OFFSET and create the Tx & Rx buffer lists. - - The Tx command chain and buffer list is setup as follows: - A Tx command table, with the data buffer pointing to... - A Tx data buffer descriptor. The packet is in a single buffer, rather than - chaining together several smaller buffers. - A NoOp command, which initially points to itself, - And the packet data. - - A transmit is done by filling in the Tx command table and data buffer, - re-writing the NoOp command, and finally changing the offset of the last - command to point to the current Tx command. When the Tx command is finished, - it jumps to the NoOp, when it loops until the next Tx command changes the - "link offset" in the NoOp. This way the 82586 never has to go through the - slow restart sequence. - - The Rx buffer list is set up in the obvious ring structure. We have enough - memory (and low enough interrupt latency) that we can avoid the complicated - Rx buffer linked lists by alway associating a full-size Rx data buffer with - each Rx data frame. - - I current use four transmit buffers starting at TX_BUF_START (0x0100), and - use the rest of memory, from RX_BUF_START to RX_BUF_END, for Rx buffers. - - */ - -static unsigned short init_words[] = { - /* System Configuration Pointer (SCP). */ - 0x0000, /* Set bus size to 16 bits. */ - 0,0, /* pad words. */ - 0x0000,0x0000, /* ISCP phys addr, set in init_82586_mem(). */ - - /* Intermediate System Configuration Pointer (ISCP). */ - 0x0001, /* Status word that's cleared when init is done. */ - 0x0008,0,0, /* SCB offset, (skip, skip) */ - - /* System Control Block (SCB). */ - 0,0xf000|RX_START|CUC_START, /* SCB status and cmd. */ - CONFIG_CMD, /* Command list pointer, points to Configure. */ - RX_BUF_START, /* Rx block list. */ - 0,0,0,0, /* Error count: CRC, align, buffer, overrun. */ - - /* 0x0018: Configure command. Change to put MAC data with packet. */ - 0, CmdConfigure, /* Status, command. */ - SET_SA_CMD, /* Next command is Set Station Addr. */ - 0x0804, /* "4" bytes of config data, 8 byte FIFO. */ - 0x2e40, /* Magic values, including MAC data location. */ - 0, /* Unused pad word. */ - - /* 0x0024: Setup station address command. */ - 0, CmdSASetup, - SET_MC_CMD, /* Next command. */ - 0xaa00,0xb000,0x0bad, /* Station address (to be filled in) */ - - /* 0x0030: NOP, looping back to itself. Point to first Tx buffer to Tx. */ - 0, CmdNOp, IDLELOOP, 0 /* pad */, - - /* 0x0038: A unused Time-Domain Reflectometer command. */ - 0, CmdTDR, IDLELOOP, 0, - - /* 0x0040: An unused Dump State command. */ - 0, CmdDump, IDLELOOP, DUMP_DATA, - - /* 0x0048: An unused Diagnose command. */ - 0, CmdDiagnose, IDLELOOP, - - /* 0x004E: An empty set-multicast-list command. */ - 0, CmdMulticastList, IDLELOOP, 0, -}; - -/* Index to functions, as function prototypes. */ - -static int el16_probe1(struct net_device *dev, int ioaddr); -static int el16_open(struct net_device *dev); -static netdev_tx_t el16_send_packet(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t el16_interrupt(int irq, void *dev_id); -static void el16_rx(struct net_device *dev); -static int el16_close(struct net_device *dev); -static void el16_tx_timeout (struct net_device *dev); - -static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad); -static void init_82586_mem(struct net_device *dev); -static const struct ethtool_ops netdev_ethtool_ops; -static void init_rx_bufs(struct net_device *); - -static int io = 0x300; -static int irq; -static int mem_start; - - -/* Check for a network adaptor of this type, and return '0' iff one exists. - If dev->base_addr == 0, probe all likely locations. - If dev->base_addr == 1, always return failure. - If dev->base_addr == 2, (detachable devices only) allocate space for the - device and return success. - */ - -struct net_device * __init el16_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); - static const unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0}; - const unsigned *port; - int err = -ENODEV; - - if (!dev) - return ERR_PTR(-ENODEV); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - io = dev->base_addr; - irq = dev->irq; - mem_start = dev->mem_start & 15; - } - - if (io > 0x1ff) /* Check a single specified location. */ - err = el16_probe1(dev, io); - else if (io != 0) - err = -ENXIO; /* Don't probe at all. */ - else { - for (port = ports; *port; port++) { - err = el16_probe1(dev, *port); - if (!err) - break; - } - } - - if (err) - goto out; - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - free_irq(dev->irq, dev); - iounmap(((struct net_local *)netdev_priv(dev))->base); - release_region(dev->base_addr, EL16_IO_EXTENT); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static const struct net_device_ops netdev_ops = { - .ndo_open = el16_open, - .ndo_stop = el16_close, - .ndo_start_xmit = el16_send_packet, - .ndo_tx_timeout = el16_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __init el16_probe1(struct net_device *dev, int ioaddr) -{ - static unsigned char init_ID_done; - int i, irq, irqval, retval; - struct net_local *lp; - - if (init_ID_done == 0) { - ushort lrs_state = 0xff; - /* Send the ID sequence to the ID_PORT to enable the board(s). */ - outb(0x00, ID_PORT); - for(i = 0; i < 255; i++) { - outb(lrs_state, ID_PORT); - lrs_state <<= 1; - if (lrs_state & 0x100) - lrs_state ^= 0xe7; - } - outb(0x00, ID_PORT); - init_ID_done = 1; - } - - if (!request_region(ioaddr, EL16_IO_EXTENT, DRV_NAME)) - return -ENODEV; - - if ((inb(ioaddr) != '*') || (inb(ioaddr + 1) != '3') || - (inb(ioaddr + 2) != 'C') || (inb(ioaddr + 3) != 'O')) { - retval = -ENODEV; - goto out; - } - - pr_info("%s: 3c507 at %#x,", dev->name, ioaddr); - - /* We should make a few more checks here, like the first three octets of - the S.A. for the manufacturer's code. */ - - irq = inb(ioaddr + IRQ_CONFIG) & 0x0f; - - irqval = request_irq(irq, el16_interrupt, 0, DRV_NAME, dev); - if (irqval) { - pr_cont("\n"); - pr_err("3c507: unable to get IRQ %d (irqval=%d).\n", irq, irqval); - retval = -EAGAIN; - goto out; - } - - /* We've committed to using the board, and can start filling in *dev. */ - dev->base_addr = ioaddr; - - outb(0x01, ioaddr + MISC_CTRL); - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + i); - pr_cont(" %pM", dev->dev_addr); - - if (mem_start) - net_debug = mem_start & 7; - -#ifdef MEM_BASE - dev->mem_start = MEM_BASE; - dev->mem_end = dev->mem_start + 0x10000; -#else - { - int base; - int size; - char mem_config = inb(ioaddr + MEM_CONFIG); - if (mem_config & 0x20) { - size = 64*1024; - base = 0xf00000 + (mem_config & 0x08 ? 0x080000 - : ((mem_config & 3) << 17)); - } else { - size = ((mem_config & 3) + 1) << 14; - base = 0x0c0000 + ( (mem_config & 0x18) << 12); - } - dev->mem_start = base; - dev->mem_end = base + size; - } -#endif - - dev->if_port = (inb(ioaddr + ROM_CONFIG) & 0x80) ? 1 : 0; - dev->irq = inb(ioaddr + IRQ_CONFIG) & 0x0f; - - pr_cont(", IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->irq, - dev->if_port ? "ex" : "in", dev->mem_start, dev->mem_end-1); - - if (net_debug) - pr_debug("%s", version); - - lp = netdev_priv(dev); - spin_lock_init(&lp->lock); - lp->base = ioremap(dev->mem_start, RX_BUF_END); - if (!lp->base) { - pr_err("3c507: unable to remap memory\n"); - retval = -EAGAIN; - goto out1; - } - - dev->netdev_ops = &netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - dev->ethtool_ops = &netdev_ethtool_ops; - dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */ - return 0; -out1: - free_irq(dev->irq, dev); -out: - release_region(ioaddr, EL16_IO_EXTENT); - return retval; -} - -static int el16_open(struct net_device *dev) -{ - /* Initialize the 82586 memory and start it. */ - init_82586_mem(dev); - - netif_start_queue(dev); - return 0; -} - - -static void el16_tx_timeout (struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - void __iomem *shmem = lp->base; - - if (net_debug > 1) - pr_debug("%s: transmit timed out, %s? ", dev->name, - readw(shmem + iSCB_STATUS) & 0x8000 ? "IRQ conflict" : - "network cable problem"); - /* Try to restart the adaptor. */ - if (lp->last_restart == dev->stats.tx_packets) { - if (net_debug > 1) - pr_cont("Resetting board.\n"); - /* Completely reset the adaptor. */ - init_82586_mem (dev); - lp->tx_pkts_in_ring = 0; - } else { - /* Issue the channel attention signal and hope it "gets better". */ - if (net_debug > 1) - pr_cont("Kicking board.\n"); - writew(0xf000 | CUC_START | RX_START, shmem + iSCB_CMD); - outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */ - lp->last_restart = dev->stats.tx_packets; - } - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue (dev); -} - - -static netdev_tx_t el16_send_packet (struct sk_buff *skb, - struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - unsigned long flags; - short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; - unsigned char *buf = skb->data; - - netif_stop_queue (dev); - - spin_lock_irqsave (&lp->lock, flags); - - dev->stats.tx_bytes += length; - /* Disable the 82586's input to the interrupt line. */ - outb (0x80, ioaddr + MISC_CTRL); - - hardware_send_packet (dev, buf, skb->len, length - skb->len); - - /* Enable the 82586 interrupt input. */ - outb (0x84, ioaddr + MISC_CTRL); - - spin_unlock_irqrestore (&lp->lock, flags); - - dev_kfree_skb (skb); - - /* You might need to clean up and record Tx statistics here. */ - - return NETDEV_TX_OK; -} - -/* The typical workload of the driver: - Handle the network interface interrupts. */ -static irqreturn_t el16_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct net_local *lp; - int ioaddr, status, boguscount = 0; - ushort ack_cmd = 0; - void __iomem *shmem; - - if (dev == NULL) { - pr_err("net_interrupt(): irq %d for unknown device.\n", irq); - return IRQ_NONE; - } - - ioaddr = dev->base_addr; - lp = netdev_priv(dev); - shmem = lp->base; - - spin_lock(&lp->lock); - - status = readw(shmem+iSCB_STATUS); - - if (net_debug > 4) { - pr_debug("%s: 3c507 interrupt, status %4.4x.\n", dev->name, status); - } - - /* Disable the 82586's input to the interrupt line. */ - outb(0x80, ioaddr + MISC_CTRL); - - /* Reap the Tx packet buffers. */ - while (lp->tx_pkts_in_ring) { - unsigned short tx_status = readw(shmem+lp->tx_reap); - if (!(tx_status & 0x8000)) { - if (net_debug > 5) - pr_debug("Tx command incomplete (%#x).\n", lp->tx_reap); - break; - } - /* Tx unsuccessful or some interesting status bit set. */ - if (!(tx_status & 0x2000) || (tx_status & 0x0f3f)) { - dev->stats.tx_errors++; - if (tx_status & 0x0600) dev->stats.tx_carrier_errors++; - if (tx_status & 0x0100) dev->stats.tx_fifo_errors++; - if (!(tx_status & 0x0040)) dev->stats.tx_heartbeat_errors++; - if (tx_status & 0x0020) dev->stats.tx_aborted_errors++; - dev->stats.collisions += tx_status & 0xf; - } - dev->stats.tx_packets++; - if (net_debug > 5) - pr_debug("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status); - lp->tx_reap += TX_BUF_SIZE; - if (lp->tx_reap > RX_BUF_START - TX_BUF_SIZE) - lp->tx_reap = TX_BUF_START; - - lp->tx_pkts_in_ring--; - /* There is always more space in the Tx ring buffer now. */ - netif_wake_queue(dev); - - if (++boguscount > 10) - break; - } - - if (status & 0x4000) { /* Packet received. */ - if (net_debug > 5) - pr_debug("Received packet, rx_head %04x.\n", lp->rx_head); - el16_rx(dev); - } - - /* Acknowledge the interrupt sources. */ - ack_cmd = status & 0xf000; - - if ((status & 0x0700) != 0x0200 && netif_running(dev)) { - if (net_debug) - pr_debug("%s: Command unit stopped, status %04x, restarting.\n", - dev->name, status); - /* If this ever occurs we should really re-write the idle loop, reset - the Tx list, and do a complete restart of the command unit. - For now we rely on the Tx timeout if the resume doesn't work. */ - ack_cmd |= CUC_RESUME; - } - - if ((status & 0x0070) != 0x0040 && netif_running(dev)) { - /* The Rx unit is not ready, it must be hung. Restart the receiver by - initializing the rx buffers, and issuing an Rx start command. */ - if (net_debug) - pr_debug("%s: Rx unit stopped, status %04x, restarting.\n", - dev->name, status); - init_rx_bufs(dev); - writew(RX_BUF_START,shmem+iSCB_RFA); - ack_cmd |= RX_START; - } - - writew(ack_cmd,shmem+iSCB_CMD); - outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */ - - /* Clear the latched interrupt. */ - outb(0, ioaddr + RESET_IRQ); - - /* Enable the 82586's interrupt input. */ - outb(0x84, ioaddr + MISC_CTRL); - spin_unlock(&lp->lock); - return IRQ_HANDLED; -} - -static int el16_close(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - void __iomem *shmem = lp->base; - - netif_stop_queue(dev); - - /* Flush the Tx and disable Rx. */ - writew(RX_SUSPEND | CUC_SUSPEND,shmem+iSCB_CMD); - outb(0, ioaddr + SIGNAL_CA); - - /* Disable the 82586's input to the interrupt line. */ - outb(0x80, ioaddr + MISC_CTRL); - - /* We always physically use the IRQ line, so we don't do free_irq(). */ - - /* Update the statistics here. */ - - return 0; -} - -/* Initialize the Rx-block list. */ -static void init_rx_bufs(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - void __iomem *write_ptr; - unsigned short SCB_base = SCB_BASE; - - int cur_rxbuf = lp->rx_head = RX_BUF_START; - - /* Initialize each Rx frame + data buffer. */ - do { /* While there is room for one more. */ - - write_ptr = lp->base + cur_rxbuf; - - writew(0x0000,write_ptr); /* Status */ - writew(0x0000,write_ptr+=2); /* Command */ - writew(cur_rxbuf + RX_BUF_SIZE,write_ptr+=2); /* Link */ - writew(cur_rxbuf + 22,write_ptr+=2); /* Buffer offset */ - writew(0x0000,write_ptr+=2); /* Pad for dest addr. */ - writew(0x0000,write_ptr+=2); - writew(0x0000,write_ptr+=2); - writew(0x0000,write_ptr+=2); /* Pad for source addr. */ - writew(0x0000,write_ptr+=2); - writew(0x0000,write_ptr+=2); - writew(0x0000,write_ptr+=2); /* Pad for protocol. */ - - writew(0x0000,write_ptr+=2); /* Buffer: Actual count */ - writew(-1,write_ptr+=2); /* Buffer: Next (none). */ - writew(cur_rxbuf + 0x20 + SCB_base,write_ptr+=2);/* Buffer: Address low */ - writew(0x0000,write_ptr+=2); - /* Finally, the number of bytes in the buffer. */ - writew(0x8000 + RX_BUF_SIZE-0x20,write_ptr+=2); - - lp->rx_tail = cur_rxbuf; - cur_rxbuf += RX_BUF_SIZE; - } while (cur_rxbuf <= RX_BUF_END - RX_BUF_SIZE); - - /* Terminate the list by setting the EOL bit, and wrap the pointer to make - the list a ring. */ - write_ptr = lp->base + lp->rx_tail + 2; - writew(0xC000,write_ptr); /* Command, mark as last. */ - writew(lp->rx_head,write_ptr+2); /* Link */ -} - -static void init_82586_mem(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - short ioaddr = dev->base_addr; - void __iomem *shmem = lp->base; - - /* Enable loopback to protect the wire while starting up, - and hold the 586 in reset during the memory initialization. */ - outb(0x20, ioaddr + MISC_CTRL); - - /* Fix the ISCP address and base. */ - init_words[3] = SCB_BASE; - init_words[7] = SCB_BASE; - - /* Write the words at 0xfff6 (address-aliased to 0xfffff6). */ - memcpy_toio(lp->base + RX_BUF_END - 10, init_words, 10); - - /* Write the words at 0x0000. */ - memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10); - - /* Fill in the station address. */ - memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN); - - /* The Tx-block list is written as needed. We just set up the values. */ - lp->tx_cmd_link = IDLELOOP + 4; - lp->tx_head = lp->tx_reap = TX_BUF_START; - - init_rx_bufs(dev); - - /* Start the 586 by releasing the reset line, but leave loopback. */ - outb(0xA0, ioaddr + MISC_CTRL); - - /* This was time consuming to track down: you need to give two channel - attention signals to reliably start up the i82586. */ - outb(0, ioaddr + SIGNAL_CA); - - { - int boguscnt = 50; - while (readw(shmem+iSCB_STATUS) == 0) - if (--boguscnt == 0) { - pr_warning("%s: i82586 initialization timed out with status %04x, cmd %04x.\n", - dev->name, readw(shmem+iSCB_STATUS), readw(shmem+iSCB_CMD)); - break; - } - /* Issue channel-attn -- the 82586 won't start. */ - outb(0, ioaddr + SIGNAL_CA); - } - - /* Disable loopback and enable interrupts. */ - outb(0x84, ioaddr + MISC_CTRL); - if (net_debug > 4) - pr_debug("%s: Initialized 82586, status %04x.\n", dev->name, - readw(shmem+iSCB_STATUS)); -} - -static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad) -{ - struct net_local *lp = netdev_priv(dev); - short ioaddr = dev->base_addr; - ushort tx_block = lp->tx_head; - void __iomem *write_ptr = lp->base + tx_block; - static char padding[ETH_ZLEN]; - - /* Set the write pointer to the Tx block, and put out the header. */ - writew(0x0000,write_ptr); /* Tx status */ - writew(CMD_INTR|CmdTx,write_ptr+=2); /* Tx command */ - writew(tx_block+16,write_ptr+=2); /* Next command is a NoOp. */ - writew(tx_block+8,write_ptr+=2); /* Data Buffer offset. */ - - /* Output the data buffer descriptor. */ - writew((pad + length) | 0x8000,write_ptr+=2); /* Byte count parameter. */ - writew(-1,write_ptr+=2); /* No next data buffer. */ - writew(tx_block+22+SCB_BASE,write_ptr+=2); /* Buffer follows the NoOp command. */ - writew(0x0000,write_ptr+=2); /* Buffer address high bits (always zero). */ - - /* Output the Loop-back NoOp command. */ - writew(0x0000,write_ptr+=2); /* Tx status */ - writew(CmdNOp,write_ptr+=2); /* Tx command */ - writew(tx_block+16,write_ptr+=2); /* Next is myself. */ - - /* Output the packet at the write pointer. */ - memcpy_toio(write_ptr+2, buf, length); - if (pad) - memcpy_toio(write_ptr+length+2, padding, pad); - - /* Set the old command link pointing to this send packet. */ - writew(tx_block,lp->base + lp->tx_cmd_link); - lp->tx_cmd_link = tx_block + 20; - - /* Set the next free tx region. */ - lp->tx_head = tx_block + TX_BUF_SIZE; - if (lp->tx_head > RX_BUF_START - TX_BUF_SIZE) - lp->tx_head = TX_BUF_START; - - if (net_debug > 4) { - pr_debug("%s: 3c507 @%x send length = %d, tx_block %3x, next %3x.\n", - dev->name, ioaddr, length, tx_block, lp->tx_head); - } - - /* Grimly block further packets if there has been insufficient reaping. */ - if (++lp->tx_pkts_in_ring < NUM_TX_BUFS) - netif_wake_queue(dev); -} - -static void el16_rx(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - void __iomem *shmem = lp->base; - ushort rx_head = lp->rx_head; - ushort rx_tail = lp->rx_tail; - ushort boguscount = 10; - short frame_status; - - while ((frame_status = readw(shmem+rx_head)) < 0) { /* Command complete */ - void __iomem *read_frame = lp->base + rx_head; - ushort rfd_cmd = readw(read_frame+2); - ushort next_rx_frame = readw(read_frame+4); - ushort data_buffer_addr = readw(read_frame+6); - void __iomem *data_frame = lp->base + data_buffer_addr; - ushort pkt_len = readw(data_frame); - - if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22 || - (pkt_len & 0xC000) != 0xC000) { - pr_err("%s: Rx frame at %#x corrupted, " - "status %04x cmd %04x next %04x " - "data-buf @%04x %04x.\n", - dev->name, rx_head, frame_status, rfd_cmd, - next_rx_frame, data_buffer_addr, pkt_len); - } else if ((frame_status & 0x2000) == 0) { - /* Frame Rxed, but with error. */ - dev->stats.rx_errors++; - if (frame_status & 0x0800) dev->stats.rx_crc_errors++; - if (frame_status & 0x0400) dev->stats.rx_frame_errors++; - if (frame_status & 0x0200) dev->stats.rx_fifo_errors++; - if (frame_status & 0x0100) dev->stats.rx_over_errors++; - if (frame_status & 0x0080) dev->stats.rx_length_errors++; - } else { - /* Malloc up new buffer. */ - struct sk_buff *skb; - - pkt_len &= 0x3fff; - skb = netdev_alloc_skb(dev, pkt_len + 2); - if (skb == NULL) { - pr_err("%s: Memory squeeze, dropping packet.\n", - dev->name); - dev->stats.rx_dropped++; - break; - } - - skb_reserve(skb,2); - - /* 'skb->data' points to the start of sk_buff data area. */ - memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len); - - skb->protocol=eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - - /* Clear the status word and set End-of-List on the rx frame. */ - writew(0,read_frame); - writew(0xC000,read_frame+2); - /* Clear the end-of-list on the prev. RFD. */ - writew(0x0000,lp->base + rx_tail + 2); - - rx_tail = rx_head; - rx_head = next_rx_frame; - if (--boguscount == 0) - break; - } - - lp->rx_head = rx_head; - lp->rx_tail = rx_tail; -} - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); -} - -static u32 netdev_get_msglevel(struct net_device *dev) -{ - return debug; -} - -static void netdev_set_msglevel(struct net_device *dev, u32 level) -{ - debug = level; -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, - .get_msglevel = netdev_get_msglevel, - .set_msglevel = netdev_set_msglevel, -}; - -#ifdef MODULE -static struct net_device *dev_3c507; -module_param(io, int, 0); -module_param(irq, int, 0); -MODULE_PARM_DESC(io, "EtherLink16 I/O base address"); -MODULE_PARM_DESC(irq, "(ignored)"); - -int __init init_module(void) -{ - if (io == 0) - pr_notice("3c507: You should not use auto-probing with insmod!\n"); - dev_3c507 = el16_probe(-1); - return IS_ERR(dev_3c507) ? PTR_ERR(dev_3c507) : 0; -} - -void __exit -cleanup_module(void) -{ - struct net_device *dev = dev_3c507; - unregister_netdev(dev); - free_irq(dev->irq, dev); - iounmap(((struct net_local *)netdev_priv(dev))->base); - release_region(dev->base_addr, EL16_IO_EXTENT); - free_netdev(dev); -} -#endif /* MODULE */ -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index 6aa927af382c..1c54e229e3cc 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -95,9 +95,6 @@ static char version[] __initdata = #if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE) #define ENABLE_BVME6000_NET #endif -#if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE) -#define ENABLE_APRICOT -#endif #ifdef ENABLE_MVME16x_NET #include <asm/mvme16xhw.h> @@ -120,8 +117,15 @@ static char version[] __initdata = #define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) #define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) #define ISCP_BUSY 0x00010000 -#define MACH_IS_APRICOT 0 #else +#error 82596.c: unknown architecture +#endif + +/* + * These were the intel versions, left here for reference. There + * are currently no x86 users of this legacy i82596 chip. + */ +#if 0 #define WSWAPrfd(x) ((struct i596_rfd *)((long)x)) #define WSWAPrbd(x) ((struct i596_rbd *)((long)x)) #define WSWAPiscp(x) ((struct i596_iscp *)((long)x)) @@ -130,7 +134,6 @@ static char version[] __initdata = #define WSWAPtbd(x) ((struct i596_tbd *)((long)x)) #define WSWAPchar(x) ((char *)((long)x)) #define ISCP_BUSY 0x0001 -#define MACH_IS_APRICOT 1 #endif /* @@ -383,11 +386,6 @@ static inline void CA(struct net_device *dev) i = *(volatile u32 *) (dev->base_addr); } #endif -#ifdef ENABLE_APRICOT - if (MACH_IS_APRICOT) { - outw(0, (short) (dev->base_addr) + 4); - } -#endif } @@ -617,9 +615,6 @@ static void rebuild_rx_bufs(struct net_device *dev) static int init_i596_mem(struct net_device *dev) { struct i596_private *lp = dev->ml_priv; -#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT) - short ioaddr = dev->base_addr; -#endif unsigned long flags; MPU_PORT(dev, PORT_RESET, NULL); @@ -653,18 +648,6 @@ static int init_i596_mem(struct net_device *dev) MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp)); -#elif defined(ENABLE_APRICOT) - - { - u32 scp = virt_to_bus(&lp->scp); - - /* change the scp address */ - outw(0, ioaddr); - outw(0, ioaddr); - outb(4, ioaddr + 0xf); - outw(scp | 2, ioaddr); - outw(scp >> 16, ioaddr); - } #endif lp->last_cmd = jiffies; @@ -677,10 +660,6 @@ static int init_i596_mem(struct net_device *dev) if (MACH_IS_BVME6000) lp->scp.sysbus = 0x0000004c; #endif -#ifdef ENABLE_APRICOT - if (MACH_IS_APRICOT) - lp->scp.sysbus = 0x00440000; -#endif lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp)); lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb)); @@ -698,10 +677,6 @@ static int init_i596_mem(struct net_device *dev) DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); -#if defined(ENABLE_APRICOT) - (void) inb(ioaddr + 0x10); - outb(4, ioaddr + 0xf); -#endif CA(dev); if (wait_istat(dev,lp,1000,"initialization timed out")) @@ -1203,43 +1178,6 @@ struct net_device * __init i82596_probe(int unit) goto found; } #endif -#ifdef ENABLE_APRICOT - { - int checksum = 0; - int ioaddr = 0x300; - - /* this is easy the ethernet interface can only be at 0x300 */ - /* first check nothing is already registered here */ - - if (!request_region(ioaddr, I596_TOTAL_SIZE, DRV_NAME)) { - printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr); - err = -EBUSY; - goto out; - } - - dev->base_addr = ioaddr; - - for (i = 0; i < 8; i++) { - eth_addr[i] = inb(ioaddr + 8 + i); - checksum += eth_addr[i]; - } - - /* checksum is a multiple of 0x100, got this wrong first time - some machines have 0x100, some 0x200. The DOS driver doesn't - even bother with the checksum. - Some other boards trip the checksum.. but then appear as - ether address 0. Trap these - AC */ - - if ((checksum % 0x100) || - (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) { - err = -ENODEV; - goto out1; - } - - dev->irq = 10; - goto found; - } -#endif err = -ENODEV; goto out; @@ -1296,9 +1234,6 @@ out2: #endif free_page ((u32)(dev->mem_start)); out1: -#ifdef ENABLE_APRICOT - release_region(dev->base_addr, I596_TOTAL_SIZE); -#endif out: free_netdev(dev); return ERR_PTR(err); @@ -1455,10 +1390,6 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) *ethirq = 3; } #endif -#ifdef ENABLE_APRICOT - (void) inb(ioaddr + 0x10); - outb(4, ioaddr + 0xf); -#endif CA(dev); DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); @@ -1589,11 +1520,6 @@ static void set_multicast_list(struct net_device *dev) #ifdef MODULE static struct net_device *dev_82596; -#ifdef ENABLE_APRICOT -module_param(irq, int, 0); -MODULE_PARM_DESC(irq, "Apricot IRQ number"); -#endif - static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "i82596 debug mask"); @@ -1620,10 +1546,6 @@ void __exit cleanup_module(void) IOMAP_FULL_CACHING); #endif free_page ((u32)(dev_82596->mem_start)); -#ifdef ENABLE_APRICOT - /* If we don't do this, we can't re-insmod it later. */ - release_region(dev_82596->base_addr, I596_TOTAL_SIZE); -#endif free_netdev(dev_82596); } diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig index 959faf7388e2..955d929cd00f 100644 --- a/drivers/net/ethernet/i825xx/Kconfig +++ b/drivers/net/ethernet/i825xx/Kconfig @@ -6,7 +6,7 @@ config NET_VENDOR_I825XX bool "Intel (82586/82593/82596) devices" default y depends on NET_VENDOR_INTEL && (ISA || ISA_DMA_API || ARM || \ - ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \ + ARCH_ACORN || SNI_RM || SUN3 || \ GSC || BVME6000 || MVME16x || EXPERIMENTAL) ---help--- If you have a network (Ethernet) card belonging to this class, say Y @@ -20,29 +20,6 @@ config NET_VENDOR_I825XX if NET_VENDOR_I825XX -config ELPLUS - tristate "3c505 \"EtherLink Plus\" support" - depends on ISA && ISA_DMA_API - ---help--- - Information about this network (Ethernet) card can be found in - <file:Documentation/networking/3c505.txt>. If you have a card of - this type, say Y and read the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called 3c505. - -config EL16 - tristate "3c507 \"EtherLink 16\" support (EXPERIMENTAL)" - depends on ISA && EXPERIMENTAL - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called 3c507. - config ARM_ETHER1 tristate "Acorn Ether1 support" depends on ARM && ARCH_ACORN @@ -50,17 +27,6 @@ config ARM_ETHER1 If you have an Acorn system with one of these (AKA25) network cards, you should say Y to this option if you wish to use it with Linux. -config APRICOT - tristate "Apricot Xen-II on board Ethernet" - depends on ISA - ---help--- - If you have a network (Ethernet) controller of this type, say Y and - read the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called apricot. - config BVME6000_NET tristate "BVME6000 Ethernet support" depends on BVME6000 @@ -70,33 +36,6 @@ config BVME6000_NET in your kernel. To compile this driver as a module, choose M here. -config EEXPRESS - tristate "EtherExpress 16 support" - depends on ISA - ---help--- - If you have an EtherExpress16 network (Ethernet) card, say Y and - read the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. Note that the Intel - EtherExpress16 card used to be regarded as a very poor choice - because the driver was very unreliable. We now have a new driver - that should do better. - - To compile this driver as a module, choose M here. The module - will be called eexpress. - -config EEXPRESS_PRO - tristate "EtherExpressPro support/EtherExpress 10 (i82595) support" - depends on ISA - ---help--- - If you have a network (Ethernet) card of this type, say Y. This - driver supports Intel i82595{FX,TX} based boards. Note however - that the EtherExpress PRO/100 Ethernet card has its own separate - driver. Please read the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called eepro. - config LASI_82596 tristate "Lasi ethernet" depends on GSC @@ -104,14 +43,6 @@ config LASI_82596 Say Y here to support the builtin Intel 82596 ethernet controller found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet. -config LP486E - tristate "LP486E on board Ethernet" - depends on ISA - ---help--- - Say Y here to support the 82596-based on-board Ethernet controller - for the Panther motherboard, which is one of the two shipped in the - Intel Professional Workstation. - config MVME16x_NET tristate "MVME16x Ethernet support" depends on MVME16x @@ -121,17 +52,6 @@ config MVME16x_NET driver for this chip in your kernel. To compile this driver as a module, choose M here. -config NI52 - tristate "NI5210 support" - depends on ISA - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called ni52. - config SNI_82596 tristate "SNI RM ethernet" depends on SNI_RM @@ -148,14 +68,4 @@ config SUN3_82586 that this driver does not support 82586-based adapters on additional VME boards. -config ZNET - tristate "Zenith Z-Note support (EXPERIMENTAL)" - depends on EXPERIMENTAL && ISA_DMA_API && X86 - ---help--- - The Zenith Z-Note notebook computer has a built-in network - (Ethernet) card, and this is the Linux driver for it. Note that the - IBM Thinkpad 300 is compatible with the Z-Note and is also supported - by this driver. Read the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - endif # NET_VENDOR_I825XX diff --git a/drivers/net/ethernet/i825xx/Makefile b/drivers/net/ethernet/i825xx/Makefile index 6adff85e8ecc..8c8dcd29c40d 100644 --- a/drivers/net/ethernet/i825xx/Makefile +++ b/drivers/net/ethernet/i825xx/Makefile @@ -3,15 +3,7 @@ # obj-$(CONFIG_ARM_ETHER1) += ether1.o -obj-$(CONFIG_EEXPRESS) += eexpress.o -obj-$(CONFIG_EEXPRESS_PRO) += eepro.o -obj-$(CONFIG_ELPLUS) += 3c505.o -obj-$(CONFIG_EL16) += 3c507.o -obj-$(CONFIG_LP486E) += lp486e.o -obj-$(CONFIG_NI52) += ni52.o obj-$(CONFIG_SUN3_82586) += sun3_82586.o -obj-$(CONFIG_ZNET) += znet.o -obj-$(CONFIG_APRICOT) += 82596.o obj-$(CONFIG_LASI_82596) += lasi_82596.o obj-$(CONFIG_SNI_82596) += sni_82596.o obj-$(CONFIG_MVME16x_NET) += 82596.o diff --git a/drivers/net/ethernet/i825xx/eepro.c b/drivers/net/ethernet/i825xx/eepro.c deleted file mode 100644 index 7f49fd54c521..000000000000 --- a/drivers/net/ethernet/i825xx/eepro.c +++ /dev/null @@ -1,1822 +0,0 @@ -/* eepro.c: Intel EtherExpress Pro/10 device driver for Linux. */ -/* - Written 1994, 1995,1996 by Bao C. Ha. - - Copyright (C) 1994, 1995,1996 by Bao C. Ha. - - This software may be used and distributed - according to the terms of the GNU General Public License, - incorporated herein by reference. - - The author may be reached at bao.ha@srs.gov - or 418 Hastings Place, Martinez, GA 30907. - - Things remaining to do: - Better record keeping of errors. - Eliminate transmit interrupt to reduce overhead. - Implement "concurrent processing". I won't be doing it! - - Bugs: - - If you have a problem of not detecting the 82595 during a - reboot (warm reset), disable the FLASH memory should fix it. - This is a compatibility hardware problem. - - Versions: - 0.13b basic ethtool support (aris, 09/13/2004) - 0.13a in memory shortage, drop packets also in board - (Michael Westermann <mw@microdata-pos.de>, 07/30/2002) - 0.13 irq sharing, rewrote probe function, fixed a nasty bug in - hardware_send_packet and a major cleanup (aris, 11/08/2001) - 0.12d fixing a problem with single card detected as eight eth devices - fixing a problem with sudden drop in card performance - (chris (asdn@go2.pl), 10/29/2001) - 0.12c fixing some problems with old cards (aris, 01/08/2001) - 0.12b misc fixes (aris, 06/26/2000) - 0.12a port of version 0.12a of 2.2.x kernels to 2.3.x - (aris (aris@conectiva.com.br), 05/19/2000) - 0.11e some tweaks about multiple cards support (PdP, jul/aug 1999) - 0.11d added __initdata, __init stuff; call spin_lock_init - in eepro_probe1. Replaced "eepro" by dev->name. Augmented - the code protected by spin_lock in interrupt routine - (PdP, 12/12/1998) - 0.11c minor cleanup (PdP, RMC, 09/12/1998) - 0.11b Pascal Dupuis (dupuis@lei.ucl.ac.be): works as a module - under 2.1.xx. Debug messages are flagged as KERN_DEBUG to - avoid console flooding. Added locking at critical parts. Now - the dawn thing is SMP safe. - 0.11a Attempt to get 2.1.xx support up (RMC) - 0.11 Brian Candler added support for multiple cards. Tested as - a module, no idea if it works when compiled into kernel. - - 0.10e Rick Bressler notified me that ifconfig up;ifconfig down fails - because the irq is lost somewhere. Fixed that by moving - request_irq and free_irq to eepro_open and eepro_close respectively. - 0.10d Ugh! Now Wakeup works. Was seriously broken in my first attempt. - I'll need to find a way to specify an ioport other than - the default one in the PnP case. PnP definitively sucks. - And, yes, this is not the only reason. - 0.10c PnP Wakeup Test for 595FX. uncomment #define PnPWakeup; - to use. - 0.10b Should work now with (some) Pro/10+. At least for - me (and my two cards) it does. _No_ guarantee for - function with non-Pro/10+ cards! (don't have any) - (RMC, 9/11/96) - - 0.10 Added support for the Etherexpress Pro/10+. The - IRQ map was changed significantly from the old - pro/10. The new interrupt map was provided by - Rainer M. Canavan (Canavan@Zeus.cs.bonn.edu). - (BCH, 9/3/96) - - 0.09 Fixed a race condition in the transmit algorithm, - which causes crashes under heavy load with fast - pentium computers. The performance should also - improve a bit. The size of RX buffer, and hence - TX buffer, can also be changed via lilo or insmod. - (BCH, 7/31/96) - - 0.08 Implement 32-bit I/O for the 82595TX and 82595FX - based lan cards. Disable full-duplex mode if TPE - is not used. (BCH, 4/8/96) - - 0.07a Fix a stat report which counts every packet as a - heart-beat failure. (BCH, 6/3/95) - - 0.07 Modified to support all other 82595-based lan cards. - The IRQ vector of the EtherExpress Pro will be set - according to the value saved in the EEPROM. For other - cards, I will do autoirq_request() to grab the next - available interrupt vector. (BCH, 3/17/95) - - 0.06a,b Interim released. Minor changes in the comments and - print out format. (BCH, 3/9/95 and 3/14/95) - - 0.06 First stable release that I am comfortable with. (BCH, - 3/2/95) - - 0.05 Complete testing of multicast. (BCH, 2/23/95) - - 0.04 Adding multicast support. (BCH, 2/14/95) - - 0.03 First widely alpha release for public testing. - (BCH, 2/14/95) - -*/ - -static const char version[] = - "eepro.c: v0.13b 09/13/2004 aris@cathedrallabs.org\n"; - -#include <linux/module.h> - -/* - Sources: - - This driver wouldn't have been written without the availability - of the Crynwr's Lan595 driver source code. It helps me to - familiarize with the 82595 chipset while waiting for the Intel - documentation. I also learned how to detect the 82595 using - the packet driver's technique. - - This driver is written by cutting and pasting the skeleton.c driver - provided by Donald Becker. I also borrowed the EEPROM routine from - Donald Becker's 82586 driver. - - Datasheet for the Intel 82595 (including the TX and FX version). It - provides just enough info that the casual reader might think that it - documents the i82595. - - The User Manual for the 82595. It provides a lot of the missing - information. - -*/ - -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/bitops.h> -#include <linux/ethtool.h> - -#include <asm/io.h> -#include <asm/dma.h> - -#define DRV_NAME "eepro" -#define DRV_VERSION "0.13c" - -#define compat_dev_kfree_skb( skb, mode ) dev_kfree_skb( (skb) ) -/* I had reports of looong delays with SLOW_DOWN defined as udelay(2) */ -#define SLOW_DOWN inb(0x80) -/* udelay(2) */ -#define compat_init_data __initdata -enum iftype { AUI=0, BNC=1, TPE=2 }; - -/* First, a few definitions that the brave might change. */ -/* A zero-terminated list of I/O addresses to be probed. */ -static unsigned int eepro_portlist[] compat_init_data = - { 0x300, 0x210, 0x240, 0x280, 0x2C0, 0x200, 0x320, 0x340, 0x360, 0}; -/* note: 0x300 is default, the 595FX supports ALL IO Ports - from 0x000 to 0x3F0, some of which are reserved in PCs */ - -/* To try the (not-really PnP Wakeup: */ -/* -#define PnPWakeup -*/ - -/* use 0 for production, 1 for verification, >2 for debug */ -#ifndef NET_DEBUG -#define NET_DEBUG 0 -#endif -static unsigned int net_debug = NET_DEBUG; - -/* The number of low I/O ports used by the ethercard. */ -#define EEPRO_IO_EXTENT 16 - -/* Different 82595 chips */ -#define LAN595 0 -#define LAN595TX 1 -#define LAN595FX 2 -#define LAN595FX_10ISA 3 - -/* Information that need to be kept for each board. */ -struct eepro_local { - unsigned rx_start; - unsigned tx_start; /* start of the transmit chain */ - int tx_last; /* pointer to last packet in the transmit chain */ - unsigned tx_end; /* end of the transmit chain (plus 1) */ - int eepro; /* 1 for the EtherExpress Pro/10, - 2 for the EtherExpress Pro/10+, - 3 for the EtherExpress 10 (blue cards), - 0 for other 82595-based lan cards. */ - int version; /* a flag to indicate if this is a TX or FX - version of the 82595 chip. */ - int stepping; - - spinlock_t lock; /* Serializing lock */ - - unsigned rcv_ram; /* pre-calculated space for rx */ - unsigned xmt_ram; /* pre-calculated space for tx */ - unsigned char xmt_bar; - unsigned char xmt_lower_limit_reg; - unsigned char xmt_upper_limit_reg; - short xmt_lower_limit; - short xmt_upper_limit; - short rcv_lower_limit; - short rcv_upper_limit; - unsigned char eeprom_reg; - unsigned short word[8]; -}; - -/* The station (ethernet) address prefix, used for IDing the board. */ -#define SA_ADDR0 0x00 /* Etherexpress Pro/10 */ -#define SA_ADDR1 0xaa -#define SA_ADDR2 0x00 - -#define GetBit(x,y) ((x & (1<<y))>>y) - -/* EEPROM Word 0: */ -#define ee_PnP 0 /* Plug 'n Play enable bit */ -#define ee_Word1 1 /* Word 1? */ -#define ee_BusWidth 2 /* 8/16 bit */ -#define ee_FlashAddr 3 /* Flash Address */ -#define ee_FlashMask 0x7 /* Mask */ -#define ee_AutoIO 6 /* */ -#define ee_reserved0 7 /* =0! */ -#define ee_Flash 8 /* Flash there? */ -#define ee_AutoNeg 9 /* Auto Negotiation enabled? */ -#define ee_IO0 10 /* IO Address LSB */ -#define ee_IO0Mask 0x /*...*/ -#define ee_IO1 15 /* IO MSB */ - -/* EEPROM Word 1: */ -#define ee_IntSel 0 /* Interrupt */ -#define ee_IntMask 0x7 -#define ee_LI 3 /* Link Integrity 0= enabled */ -#define ee_PC 4 /* Polarity Correction 0= enabled */ -#define ee_TPE_AUI 5 /* PortSelection 1=TPE */ -#define ee_Jabber 6 /* Jabber prevention 0= enabled */ -#define ee_AutoPort 7 /* Auto Port Selection 1= Disabled */ -#define ee_SMOUT 8 /* SMout Pin Control 0= Input */ -#define ee_PROM 9 /* Flash EPROM / PROM 0=Flash */ -#define ee_reserved1 10 /* .. 12 =0! */ -#define ee_AltReady 13 /* Alternate Ready, 0=normal */ -#define ee_reserved2 14 /* =0! */ -#define ee_Duplex 15 - -/* Word2,3,4: */ -#define ee_IA5 0 /*bit start for individual Addr Byte 5 */ -#define ee_IA4 8 /*bit start for individual Addr Byte 5 */ -#define ee_IA3 0 /*bit start for individual Addr Byte 5 */ -#define ee_IA2 8 /*bit start for individual Addr Byte 5 */ -#define ee_IA1 0 /*bit start for individual Addr Byte 5 */ -#define ee_IA0 8 /*bit start for individual Addr Byte 5 */ - -/* Word 5: */ -#define ee_BNC_TPE 0 /* 0=TPE */ -#define ee_BootType 1 /* 00=None, 01=IPX, 10=ODI, 11=NDIS */ -#define ee_BootTypeMask 0x3 -#define ee_NumConn 3 /* Number of Connections 0= One or Two */ -#define ee_FlashSock 4 /* Presence of Flash Socket 0= Present */ -#define ee_PortTPE 5 -#define ee_PortBNC 6 -#define ee_PortAUI 7 -#define ee_PowerMgt 10 /* 0= disabled */ -#define ee_CP 13 /* Concurrent Processing */ -#define ee_CPMask 0x7 - -/* Word 6: */ -#define ee_Stepping 0 /* Stepping info */ -#define ee_StepMask 0x0F -#define ee_BoardID 4 /* Manucaturer Board ID, reserved */ -#define ee_BoardMask 0x0FFF - -/* Word 7: */ -#define ee_INT_TO_IRQ 0 /* int to IRQ Mapping = 0x1EB8 for Pro/10+ */ -#define ee_FX_INT2IRQ 0x1EB8 /* the _only_ mapping allowed for FX chips */ - -/*..*/ -#define ee_SIZE 0x40 /* total EEprom Size */ -#define ee_Checksum 0xBABA /* initial and final value for adding checksum */ - - -/* Card identification via EEprom: */ -#define ee_addr_vendor 0x10 /* Word offset for EISA Vendor ID */ -#define ee_addr_id 0x11 /* Word offset for Card ID */ -#define ee_addr_SN 0x12 /* Serial Number */ -#define ee_addr_CRC_8 0x14 /* CRC over last thee Bytes */ - - -#define ee_vendor_intel0 0x25 /* Vendor ID Intel */ -#define ee_vendor_intel1 0xD4 -#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */ -#define ee_id_eepro10p1 0x31 - -#define TX_TIMEOUT ((4*HZ)/10) - -/* Index to functions, as function prototypes. */ - -static int eepro_probe1(struct net_device *dev, int autoprobe); -static int eepro_open(struct net_device *dev); -static netdev_tx_t eepro_send_packet(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t eepro_interrupt(int irq, void *dev_id); -static void eepro_rx(struct net_device *dev); -static void eepro_transmit_interrupt(struct net_device *dev); -static int eepro_close(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static void eepro_tx_timeout (struct net_device *dev); - -static int read_eeprom(int ioaddr, int location, struct net_device *dev); -static int hardware_send_packet(struct net_device *dev, void *buf, short length); -static int eepro_grab_irq(struct net_device *dev); - -/* - Details of the i82595. - -You will need either the datasheet or the user manual to understand what -is going on here. The 82595 is very different from the 82586, 82593. - -The receive algorithm in eepro_rx() is just an implementation of the -RCV ring structure that the Intel 82595 imposes at the hardware level. -The receive buffer is set at 24K, and the transmit buffer is 8K. I -am assuming that the total buffer memory is 32K, which is true for the -Intel EtherExpress Pro/10. If it is less than that on a generic card, -the driver will be broken. - -The transmit algorithm in the hardware_send_packet() is similar to the -one in the eepro_rx(). The transmit buffer is a ring linked list. -I just queue the next available packet to the end of the list. In my -system, the 82595 is so fast that the list seems to always contain a -single packet. In other systems with faster computers and more congested -network traffics, the ring linked list should improve performance by -allowing up to 8K worth of packets to be queued. - -The sizes of the receive and transmit buffers can now be changed via lilo -or insmod. Lilo uses the appended line "ether=io,irq,debug,rx-buffer,eth0" -where rx-buffer is in KB unit. Modules uses the parameter mem which is -also in KB unit, for example "insmod io=io-address irq=0 mem=rx-buffer." -The receive buffer has to be more than 3K or less than 29K. Otherwise, -it is reset to the default of 24K, and, hence, 8K for the trasnmit -buffer (transmit-buffer = 32K - receive-buffer). - -*/ -#define RAM_SIZE 0x8000 - -#define RCV_HEADER 8 -#define RCV_DEFAULT_RAM 0x6000 - -#define XMT_HEADER 8 -#define XMT_DEFAULT_RAM (RAM_SIZE - RCV_DEFAULT_RAM) - -#define XMT_START_PRO RCV_DEFAULT_RAM -#define XMT_START_10 0x0000 -#define RCV_START_PRO 0x0000 -#define RCV_START_10 XMT_DEFAULT_RAM - -#define RCV_DONE 0x0008 -#define RX_OK 0x2000 -#define RX_ERROR 0x0d81 - -#define TX_DONE_BIT 0x0080 -#define TX_OK 0x2000 -#define CHAIN_BIT 0x8000 -#define XMT_STATUS 0x02 -#define XMT_CHAIN 0x04 -#define XMT_COUNT 0x06 - -#define BANK0_SELECT 0x00 -#define BANK1_SELECT 0x40 -#define BANK2_SELECT 0x80 - -/* Bank 0 registers */ -#define COMMAND_REG 0x00 /* Register 0 */ -#define MC_SETUP 0x03 -#define XMT_CMD 0x04 -#define DIAGNOSE_CMD 0x07 -#define RCV_ENABLE_CMD 0x08 -#define RCV_DISABLE_CMD 0x0a -#define STOP_RCV_CMD 0x0b -#define RESET_CMD 0x0e -#define POWER_DOWN_CMD 0x18 -#define RESUME_XMT_CMD 0x1c -#define SEL_RESET_CMD 0x1e -#define STATUS_REG 0x01 /* Register 1 */ -#define RX_INT 0x02 -#define TX_INT 0x04 -#define EXEC_STATUS 0x30 -#define ID_REG 0x02 /* Register 2 */ -#define R_ROBIN_BITS 0xc0 /* round robin counter */ -#define ID_REG_MASK 0x2c -#define ID_REG_SIG 0x24 -#define AUTO_ENABLE 0x10 -#define INT_MASK_REG 0x03 /* Register 3 */ -#define RX_STOP_MASK 0x01 -#define RX_MASK 0x02 -#define TX_MASK 0x04 -#define EXEC_MASK 0x08 -#define ALL_MASK 0x0f -#define IO_32_BIT 0x10 -#define RCV_BAR 0x04 /* The following are word (16-bit) registers */ -#define RCV_STOP 0x06 - -#define XMT_BAR_PRO 0x0a -#define XMT_BAR_10 0x0b - -#define HOST_ADDRESS_REG 0x0c -#define IO_PORT 0x0e -#define IO_PORT_32_BIT 0x0c - -/* Bank 1 registers */ -#define REG1 0x01 -#define WORD_WIDTH 0x02 -#define INT_ENABLE 0x80 -#define INT_NO_REG 0x02 -#define RCV_LOWER_LIMIT_REG 0x08 -#define RCV_UPPER_LIMIT_REG 0x09 - -#define XMT_LOWER_LIMIT_REG_PRO 0x0a -#define XMT_UPPER_LIMIT_REG_PRO 0x0b -#define XMT_LOWER_LIMIT_REG_10 0x0b -#define XMT_UPPER_LIMIT_REG_10 0x0a - -/* Bank 2 registers */ -#define XMT_Chain_Int 0x20 /* Interrupt at the end of the transmit chain */ -#define XMT_Chain_ErrStop 0x40 /* Interrupt at the end of the chain even if there are errors */ -#define RCV_Discard_BadFrame 0x80 /* Throw bad frames away, and continue to receive others */ -#define REG2 0x02 -#define PRMSC_Mode 0x01 -#define Multi_IA 0x20 -#define REG3 0x03 -#define TPE_BIT 0x04 -#define BNC_BIT 0x20 -#define REG13 0x0d -#define FDX 0x00 -#define A_N_ENABLE 0x02 - -#define I_ADD_REG0 0x04 -#define I_ADD_REG1 0x05 -#define I_ADD_REG2 0x06 -#define I_ADD_REG3 0x07 -#define I_ADD_REG4 0x08 -#define I_ADD_REG5 0x09 - -#define EEPROM_REG_PRO 0x0a -#define EEPROM_REG_10 0x0b - -#define EESK 0x01 -#define EECS 0x02 -#define EEDI 0x04 -#define EEDO 0x08 - -/* do a full reset */ -#define eepro_reset(ioaddr) outb(RESET_CMD, ioaddr) - -/* do a nice reset */ -#define eepro_sel_reset(ioaddr) { \ - outb(SEL_RESET_CMD, ioaddr); \ - SLOW_DOWN; \ - SLOW_DOWN; \ - } - -/* disable all interrupts */ -#define eepro_dis_int(ioaddr) outb(ALL_MASK, ioaddr + INT_MASK_REG) - -/* clear all interrupts */ -#define eepro_clear_int(ioaddr) outb(ALL_MASK, ioaddr + STATUS_REG) - -/* enable tx/rx */ -#define eepro_en_int(ioaddr) outb(ALL_MASK & ~(RX_MASK | TX_MASK), \ - ioaddr + INT_MASK_REG) - -/* enable exec event interrupt */ -#define eepro_en_intexec(ioaddr) outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG) - -/* enable rx */ -#define eepro_en_rx(ioaddr) outb(RCV_ENABLE_CMD, ioaddr) - -/* disable rx */ -#define eepro_dis_rx(ioaddr) outb(RCV_DISABLE_CMD, ioaddr) - -/* switch bank */ -#define eepro_sw2bank0(ioaddr) outb(BANK0_SELECT, ioaddr) -#define eepro_sw2bank1(ioaddr) outb(BANK1_SELECT, ioaddr) -#define eepro_sw2bank2(ioaddr) outb(BANK2_SELECT, ioaddr) - -/* enable interrupt line */ -#define eepro_en_intline(ioaddr) outb(inb(ioaddr + REG1) | INT_ENABLE,\ - ioaddr + REG1) - -/* disable interrupt line */ -#define eepro_dis_intline(ioaddr) outb(inb(ioaddr + REG1) & 0x7f, \ - ioaddr + REG1); - -/* set diagnose flag */ -#define eepro_diag(ioaddr) outb(DIAGNOSE_CMD, ioaddr) - -/* ack for rx int */ -#define eepro_ack_rx(ioaddr) outb (RX_INT, ioaddr + STATUS_REG) - -/* ack for tx int */ -#define eepro_ack_tx(ioaddr) outb (TX_INT, ioaddr + STATUS_REG) - -/* a complete sel reset */ -#define eepro_complete_selreset(ioaddr) { \ - dev->stats.tx_errors++;\ - eepro_sel_reset(ioaddr);\ - lp->tx_end = \ - lp->xmt_lower_limit;\ - lp->tx_start = lp->tx_end;\ - lp->tx_last = 0;\ - dev->trans_start = jiffies;\ - netif_wake_queue(dev);\ - eepro_en_rx(ioaddr);\ - } - -/* Check for a network adaptor of this type, and return '0' if one exists. - If dev->base_addr == 0, probe all likely locations. - If dev->base_addr == 1, always return failure. - If dev->base_addr == 2, allocate space for the device and return success - (detachable devices only). - */ -static int __init do_eepro_probe(struct net_device *dev) -{ - int i; - int base_addr = dev->base_addr; - int irq = dev->irq; - -#ifdef PnPWakeup - /* XXXX for multiple cards should this only be run once? */ - - /* Wakeup: */ - #define WakeupPort 0x279 - #define WakeupSeq {0x6A, 0xB5, 0xDA, 0xED, 0xF6, 0xFB, 0x7D, 0xBE,\ - 0xDF, 0x6F, 0x37, 0x1B, 0x0D, 0x86, 0xC3, 0x61,\ - 0xB0, 0x58, 0x2C, 0x16, 0x8B, 0x45, 0xA2, 0xD1,\ - 0xE8, 0x74, 0x3A, 0x9D, 0xCE, 0xE7, 0x73, 0x43} - - { - unsigned short int WS[32]=WakeupSeq; - - if (request_region(WakeupPort, 2, "eepro wakeup")) { - if (net_debug>5) - printk(KERN_DEBUG "Waking UP\n"); - - outb_p(0,WakeupPort); - outb_p(0,WakeupPort); - for (i=0; i<32; i++) { - outb_p(WS[i],WakeupPort); - if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]); - } - - release_region(WakeupPort, 2); - } else - printk(KERN_WARNING "PnP wakeup region busy!\n"); - } -#endif - - if (base_addr > 0x1ff) /* Check a single specified location. */ - return eepro_probe1(dev, 0); - - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - for (i = 0; eepro_portlist[i]; i++) { - dev->base_addr = eepro_portlist[i]; - dev->irq = irq; - if (eepro_probe1(dev, 1) == 0) - return 0; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init eepro_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct eepro_local)); - int err; - - if (!dev) - return ERR_PTR(-ENODEV); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_eepro_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static void __init printEEPROMInfo(struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - unsigned short Word; - int i,j; - - j = ee_Checksum; - for (i = 0; i < 8; i++) - j += lp->word[i]; - for ( ; i < ee_SIZE; i++) - j += read_eeprom(ioaddr, i, dev); - - printk(KERN_DEBUG "Checksum: %#x\n",j&0xffff); - - Word = lp->word[0]; - printk(KERN_DEBUG "Word0:\n"); - printk(KERN_DEBUG " Plug 'n Pray: %d\n",GetBit(Word,ee_PnP)); - printk(KERN_DEBUG " Buswidth: %d\n",(GetBit(Word,ee_BusWidth)+1)*8 ); - printk(KERN_DEBUG " AutoNegotiation: %d\n",GetBit(Word,ee_AutoNeg)); - printk(KERN_DEBUG " IO Address: %#x\n", (Word>>ee_IO0)<<4); - - if (net_debug>4) { - Word = lp->word[1]; - printk(KERN_DEBUG "Word1:\n"); - printk(KERN_DEBUG " INT: %d\n", Word & ee_IntMask); - printk(KERN_DEBUG " LI: %d\n", GetBit(Word,ee_LI)); - printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC)); - printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI)); - printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber)); - printk(KERN_DEBUG " AutoPort: %d\n", !GetBit(Word,ee_AutoPort)); - printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex)); - } - - Word = lp->word[5]; - printk(KERN_DEBUG "Word5:\n"); - printk(KERN_DEBUG " BNC: %d\n",GetBit(Word,ee_BNC_TPE)); - printk(KERN_DEBUG " NumConnectors: %d\n",GetBit(Word,ee_NumConn)); - printk(KERN_DEBUG " Has "); - if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE "); - if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC "); - if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI "); - printk(KERN_DEBUG "port(s)\n"); - - Word = lp->word[6]; - printk(KERN_DEBUG "Word6:\n"); - printk(KERN_DEBUG " Stepping: %d\n",Word & ee_StepMask); - printk(KERN_DEBUG " BoardID: %d\n",Word>>ee_BoardID); - - Word = lp->word[7]; - printk(KERN_DEBUG "Word7:\n"); - printk(KERN_DEBUG " INT to IRQ:\n"); - - for (i=0, j=0; i<15; i++) - if (GetBit(Word,i)) printk(KERN_DEBUG " INT%d -> IRQ %d;",j++,i); - - printk(KERN_DEBUG "\n"); -} - -/* function to recalculate the limits of buffer based on rcv_ram */ -static void eepro_recalc (struct net_device *dev) -{ - struct eepro_local * lp; - - lp = netdev_priv(dev); - lp->xmt_ram = RAM_SIZE - lp->rcv_ram; - - if (lp->eepro == LAN595FX_10ISA) { - lp->xmt_lower_limit = XMT_START_10; - lp->xmt_upper_limit = (lp->xmt_ram - 2); - lp->rcv_lower_limit = lp->xmt_ram; - lp->rcv_upper_limit = (RAM_SIZE - 2); - } - else { - lp->rcv_lower_limit = RCV_START_PRO; - lp->rcv_upper_limit = (lp->rcv_ram - 2); - lp->xmt_lower_limit = lp->rcv_ram; - lp->xmt_upper_limit = (RAM_SIZE - 2); - } -} - -/* prints boot-time info */ -static void __init eepro_print_info (struct net_device *dev) -{ - struct eepro_local * lp = netdev_priv(dev); - int i; - const char * ifmap[] = {"AUI", "10Base2", "10BaseT"}; - - i = inb(dev->base_addr + ID_REG); - printk(KERN_DEBUG " id: %#x ",i); - printk(" io: %#x ", (unsigned)dev->base_addr); - - switch (lp->eepro) { - case LAN595FX_10ISA: - printk("%s: Intel EtherExpress 10 ISA\n at %#x,", - dev->name, (unsigned)dev->base_addr); - break; - case LAN595FX: - printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,", - dev->name, (unsigned)dev->base_addr); - break; - case LAN595TX: - printk("%s: Intel EtherExpress Pro/10 ISA at %#x,", - dev->name, (unsigned)dev->base_addr); - break; - case LAN595: - printk("%s: Intel 82595-based lan card at %#x,", - dev->name, (unsigned)dev->base_addr); - break; - } - - printk(" %pM", dev->dev_addr); - - if (net_debug > 3) - printk(KERN_DEBUG ", %dK RCV buffer", - (int)(lp->rcv_ram)/1024); - - if (dev->irq > 2) - printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]); - else - printk(", %s.\n", ifmap[dev->if_port]); - - if (net_debug > 3) { - i = lp->word[5]; - if (i & 0x2000) /* bit 13 of EEPROM word 5 */ - printk(KERN_DEBUG "%s: Concurrent Processing is " - "enabled but not used!\n", dev->name); - } - - /* Check the station address for the manufacturer's code */ - if (net_debug>3) - printEEPROMInfo(dev); -} - -static const struct ethtool_ops eepro_ethtool_ops; - -static const struct net_device_ops eepro_netdev_ops = { - .ndo_open = eepro_open, - .ndo_stop = eepro_close, - .ndo_start_xmit = eepro_send_packet, - .ndo_set_rx_mode = set_multicast_list, - .ndo_tx_timeout = eepro_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/* This is the real probe routine. Linux has a history of friendly device - probes on the ISA bus. A good device probe avoids doing writes, and - verifies that the correct device exists and functions. */ - -static int __init eepro_probe1(struct net_device *dev, int autoprobe) -{ - unsigned short station_addr[3], id, counter; - int i; - struct eepro_local *lp; - int ioaddr = dev->base_addr; - int err; - - /* Grab the region so we can find another board if autoIRQ fails. */ - if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) { - if (!autoprobe) - printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n", - ioaddr); - return -EBUSY; - } - - /* Now, we are going to check for the signature of the - ID_REG (register 2 of bank 0) */ - - id = inb(ioaddr + ID_REG); - - if ((id & ID_REG_MASK) != ID_REG_SIG) - goto exit; - - /* We seem to have the 82595 signature, let's - play with its counter (last 2 bits of - register 2 of bank 0) to be sure. */ - - counter = id & R_ROBIN_BITS; - - if ((inb(ioaddr + ID_REG) & R_ROBIN_BITS) != (counter + 0x40)) - goto exit; - - lp = netdev_priv(dev); - memset(lp, 0, sizeof(struct eepro_local)); - lp->xmt_bar = XMT_BAR_PRO; - lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_PRO; - lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_PRO; - lp->eeprom_reg = EEPROM_REG_PRO; - spin_lock_init(&lp->lock); - - /* Now, get the ethernet hardware address from - the EEPROM */ - station_addr[0] = read_eeprom(ioaddr, 2, dev); - - /* FIXME - find another way to know that we've found - * an Etherexpress 10 - */ - if (station_addr[0] == 0x0000 || station_addr[0] == 0xffff) { - lp->eepro = LAN595FX_10ISA; - lp->eeprom_reg = EEPROM_REG_10; - lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_10; - lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_10; - lp->xmt_bar = XMT_BAR_10; - station_addr[0] = read_eeprom(ioaddr, 2, dev); - } - - /* get all words at once. will be used here and for ethtool */ - for (i = 0; i < 8; i++) { - lp->word[i] = read_eeprom(ioaddr, i, dev); - } - station_addr[1] = lp->word[3]; - station_addr[2] = lp->word[4]; - - if (!lp->eepro) { - if (lp->word[7] == ee_FX_INT2IRQ) - lp->eepro = 2; - else if (station_addr[2] == SA_ADDR1) - lp->eepro = 1; - } - - /* Fill in the 'dev' fields. */ - for (i=0; i < 6; i++) - dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i]; - - /* RX buffer must be more than 3K and less than 29K */ - if (dev->mem_end < 3072 || dev->mem_end > 29696) - lp->rcv_ram = RCV_DEFAULT_RAM; - - /* calculate {xmt,rcv}_{lower,upper}_limit */ - eepro_recalc(dev); - - if (GetBit(lp->word[5], ee_BNC_TPE)) - dev->if_port = BNC; - else - dev->if_port = TPE; - - if (dev->irq < 2 && lp->eepro != 0) { - /* Mask off INT number */ - int count = lp->word[1] & 7; - unsigned irqMask = lp->word[7]; - - while (count--) - irqMask &= irqMask - 1; - - count = ffs(irqMask); - - if (count) - dev->irq = count - 1; - - if (dev->irq < 2) { - printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n"); - goto exit; - } else if (dev->irq == 2) { - dev->irq = 9; - } - } - - dev->netdev_ops = &eepro_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - dev->ethtool_ops = &eepro_ethtool_ops; - - /* print boot time info */ - eepro_print_info(dev); - - /* reset 82595 */ - eepro_reset(ioaddr); - - err = register_netdev(dev); - if (err) - goto err; - return 0; -exit: - err = -ENODEV; -err: - release_region(dev->base_addr, EEPRO_IO_EXTENT); - return err; -} - -/* Open/initialize the board. This is called (in the current kernel) - sometime after booting when the 'ifconfig' program is run. - - This routine should set everything up anew at each open, even - registers that "should" only need to be set once at boot, so that - there is non-reboot way to recover if something goes wrong. - */ - -static const char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1}; -static const char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1}; -static int eepro_grab_irq(struct net_device *dev) -{ - static const int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 }; - const int *irqp = irqlist; - int temp_reg, ioaddr = dev->base_addr; - - eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ - - /* Enable the interrupt line. */ - eepro_en_intline(ioaddr); - - /* be CAREFUL, BANK 0 now */ - eepro_sw2bank0(ioaddr); - - /* clear all interrupts */ - eepro_clear_int(ioaddr); - - /* Let EXEC event to interrupt */ - eepro_en_intexec(ioaddr); - - do { - eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ - - temp_reg = inb(ioaddr + INT_NO_REG); - outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG); - - eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ - - if (request_irq (*irqp, NULL, IRQF_SHARED, "bogus", dev) != EBUSY) { - unsigned long irq_mask; - /* Twinkle the interrupt, and check if it's seen */ - irq_mask = probe_irq_on(); - - eepro_diag(ioaddr); /* RESET the 82595 */ - mdelay(20); - - if (*irqp == probe_irq_off(irq_mask)) /* It's a good IRQ line */ - break; - - /* clear all interrupts */ - eepro_clear_int(ioaddr); - } - } while (*++irqp); - - eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */ - - /* Disable the physical interrupt line. */ - eepro_dis_intline(ioaddr); - - eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ - - /* Mask all the interrupts. */ - eepro_dis_int(ioaddr); - - /* clear all interrupts */ - eepro_clear_int(ioaddr); - - return dev->irq; -} - -static int eepro_open(struct net_device *dev) -{ - unsigned short temp_reg, old8, old9; - int irqMask; - int i, ioaddr = dev->base_addr; - struct eepro_local *lp = netdev_priv(dev); - - if (net_debug > 3) - printk(KERN_DEBUG "%s: entering eepro_open routine.\n", dev->name); - - irqMask = lp->word[7]; - - if (lp->eepro == LAN595FX_10ISA) { - if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 3;\n"); - } - else if (irqMask == ee_FX_INT2IRQ) /* INT to IRQ Mask */ - { - lp->eepro = 2; /* Yes, an Intel EtherExpress Pro/10+ */ - if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 2;\n"); - } - - else if ((dev->dev_addr[0] == SA_ADDR0 && - dev->dev_addr[1] == SA_ADDR1 && - dev->dev_addr[2] == SA_ADDR2)) - { - lp->eepro = 1; - if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 1;\n"); - } /* Yes, an Intel EtherExpress Pro/10 */ - - else lp->eepro = 0; /* No, it is a generic 82585 lan card */ - - /* Get the interrupt vector for the 82595 */ - if (dev->irq < 2 && eepro_grab_irq(dev) == 0) { - printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); - return -EAGAIN; - } - - if (request_irq(dev->irq , eepro_interrupt, 0, dev->name, dev)) { - printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); - return -EAGAIN; - } - - /* Initialize the 82595. */ - - eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ - temp_reg = inb(ioaddr + lp->eeprom_reg); - - lp->stepping = temp_reg >> 5; /* Get the stepping number of the 595 */ - - if (net_debug > 3) - printk(KERN_DEBUG "The stepping of the 82595 is %d\n", lp->stepping); - - if (temp_reg & 0x10) /* Check the TurnOff Enable bit */ - outb(temp_reg & 0xef, ioaddr + lp->eeprom_reg); - for (i=0; i < 6; i++) - outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i); - - temp_reg = inb(ioaddr + REG1); /* Setup Transmit Chaining */ - outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop /* and discard bad RCV frames */ - | RCV_Discard_BadFrame, ioaddr + REG1); - - temp_reg = inb(ioaddr + REG2); /* Match broadcast */ - outb(temp_reg | 0x14, ioaddr + REG2); - - temp_reg = inb(ioaddr + REG3); - outb(temp_reg & 0x3f, ioaddr + REG3); /* clear test mode */ - - /* Set the receiving mode */ - eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ - - /* Set the interrupt vector */ - temp_reg = inb(ioaddr + INT_NO_REG); - if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA) - outb((temp_reg & 0xf8) | irqrmap2[dev->irq], ioaddr + INT_NO_REG); - else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG); - - - temp_reg = inb(ioaddr + INT_NO_REG); - if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA) - outb((temp_reg & 0xf0) | irqrmap2[dev->irq] | 0x08,ioaddr+INT_NO_REG); - else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG); - - if (net_debug > 3) - printk(KERN_DEBUG "eepro_open: content of INT Reg is %x\n", temp_reg); - - - /* Initialize the RCV and XMT upper and lower limits */ - outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG); - outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG); - outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg); - outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg); - - /* Enable the interrupt line. */ - eepro_en_intline(ioaddr); - - /* Switch back to Bank 0 */ - eepro_sw2bank0(ioaddr); - - /* Let RX and TX events to interrupt */ - eepro_en_int(ioaddr); - - /* clear all interrupts */ - eepro_clear_int(ioaddr); - - /* Initialize RCV */ - outw(lp->rcv_lower_limit, ioaddr + RCV_BAR); - lp->rx_start = lp->rcv_lower_limit; - outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP); - - /* Initialize XMT */ - outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar); - lp->tx_start = lp->tx_end = lp->xmt_lower_limit; - lp->tx_last = 0; - - /* Check for the i82595TX and i82595FX */ - old8 = inb(ioaddr + 8); - outb(~old8, ioaddr + 8); - - if ((temp_reg = inb(ioaddr + 8)) == old8) { - if (net_debug > 3) - printk(KERN_DEBUG "i82595 detected!\n"); - lp->version = LAN595; - } - else { - lp->version = LAN595TX; - outb(old8, ioaddr + 8); - old9 = inb(ioaddr + 9); - - if (irqMask==ee_FX_INT2IRQ) { - if (net_debug > 3) { - printk(KERN_DEBUG "IrqMask: %#x\n",irqMask); - printk(KERN_DEBUG "i82595FX detected!\n"); - } - lp->version = LAN595FX; - outb(old9, ioaddr + 9); - if (dev->if_port != TPE) { /* Hopefully, this will fix the - problem of using Pentiums and - pro/10 w/ BNC. */ - eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ - temp_reg = inb(ioaddr + REG13); - /* disable the full duplex mode since it is not - applicable with the 10Base2 cable. */ - outb(temp_reg & ~(FDX | A_N_ENABLE), REG13); - eepro_sw2bank0(ioaddr); /* be CAREFUL, BANK 0 now */ - } - } - else if (net_debug > 3) { - printk(KERN_DEBUG "temp_reg: %#x ~old9: %#x\n",temp_reg,((~old9)&0xff)); - printk(KERN_DEBUG "i82595TX detected!\n"); - } - } - - eepro_sel_reset(ioaddr); - - netif_start_queue(dev); - - if (net_debug > 3) - printk(KERN_DEBUG "%s: exiting eepro_open routine.\n", dev->name); - - /* enabling rx */ - eepro_en_rx(ioaddr); - - return 0; -} - -static void eepro_tx_timeout (struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - /* if (net_debug > 1) */ - printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name, - "network cable problem"); - /* This is not a duplicate. One message for the console, - one for the log file */ - printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name, - "network cable problem"); - eepro_complete_selreset(ioaddr); -} - - -static netdev_tx_t eepro_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - unsigned long flags; - int ioaddr = dev->base_addr; - short length = skb->len; - - if (net_debug > 5) - printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name); - - if (length < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - length = ETH_ZLEN; - } - netif_stop_queue (dev); - - eepro_dis_int(ioaddr); - spin_lock_irqsave(&lp->lock, flags); - - { - unsigned char *buf = skb->data; - - if (hardware_send_packet(dev, buf, length)) - /* we won't wake queue here because we're out of space */ - dev->stats.tx_dropped++; - else { - dev->stats.tx_bytes+=skb->len; - netif_wake_queue(dev); - } - - } - - dev_kfree_skb (skb); - - /* You might need to clean up and record Tx statistics here. */ - /* dev->stats.tx_aborted_errors++; */ - - if (net_debug > 5) - printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name); - - eepro_en_int(ioaddr); - spin_unlock_irqrestore(&lp->lock, flags); - - return NETDEV_TX_OK; -} - - -/* The typical workload of the driver: - Handle the network interface interrupts. */ - -static irqreturn_t -eepro_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct eepro_local *lp; - int ioaddr, status, boguscount = 20; - int handled = 0; - - lp = netdev_priv(dev); - - spin_lock(&lp->lock); - - if (net_debug > 5) - printk(KERN_DEBUG "%s: entering eepro_interrupt routine.\n", dev->name); - - ioaddr = dev->base_addr; - - while (((status = inb(ioaddr + STATUS_REG)) & (RX_INT|TX_INT)) && (boguscount--)) - { - handled = 1; - if (status & RX_INT) { - if (net_debug > 4) - printk(KERN_DEBUG "%s: packet received interrupt.\n", dev->name); - - eepro_dis_int(ioaddr); - - /* Get the received packets */ - eepro_ack_rx(ioaddr); - eepro_rx(dev); - - eepro_en_int(ioaddr); - } - if (status & TX_INT) { - if (net_debug > 4) - printk(KERN_DEBUG "%s: packet transmit interrupt.\n", dev->name); - - - eepro_dis_int(ioaddr); - - /* Process the status of transmitted packets */ - eepro_ack_tx(ioaddr); - eepro_transmit_interrupt(dev); - - eepro_en_int(ioaddr); - } - } - - if (net_debug > 5) - printk(KERN_DEBUG "%s: exiting eepro_interrupt routine.\n", dev->name); - - spin_unlock(&lp->lock); - return IRQ_RETVAL(handled); -} - -static int eepro_close(struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - short temp_reg; - - netif_stop_queue(dev); - - eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */ - - /* Disable the physical interrupt line. */ - temp_reg = inb(ioaddr + REG1); - outb(temp_reg & 0x7f, ioaddr + REG1); - - eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ - - /* Flush the Tx and disable Rx. */ - outb(STOP_RCV_CMD, ioaddr); - lp->tx_start = lp->tx_end = lp->xmt_lower_limit; - lp->tx_last = 0; - - /* Mask all the interrupts. */ - eepro_dis_int(ioaddr); - - /* clear all interrupts */ - eepro_clear_int(ioaddr); - - /* Reset the 82595 */ - eepro_reset(ioaddr); - - /* release the interrupt */ - free_irq(dev->irq, dev); - - /* Update the statistics here. What statistics? */ - - return 0; -} - -/* Set or clear the multicast filter for this adaptor. - */ -static void -set_multicast_list(struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - short ioaddr = dev->base_addr; - unsigned short mode; - struct netdev_hw_addr *ha; - int mc_count = netdev_mc_count(dev); - - if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63) - { - eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ - mode = inb(ioaddr + REG2); - outb(mode | PRMSC_Mode, ioaddr + REG2); - mode = inb(ioaddr + REG3); - outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ - eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ - } - - else if (mc_count == 0) - { - eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ - mode = inb(ioaddr + REG2); - outb(mode & 0xd6, ioaddr + REG2); /* Turn off Multi-IA and PRMSC_Mode bits */ - mode = inb(ioaddr + REG3); - outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ - eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ - } - - else - { - unsigned short status, *eaddrs; - int i, boguscount = 0; - - /* Disable RX and TX interrupts. Necessary to avoid - corruption of the HOST_ADDRESS_REG by interrupt - service routines. */ - eepro_dis_int(ioaddr); - - eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ - mode = inb(ioaddr + REG2); - outb(mode | Multi_IA, ioaddr + REG2); - mode = inb(ioaddr + REG3); - outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ - eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ - outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG); - outw(MC_SETUP, ioaddr + IO_PORT); - outw(0, ioaddr + IO_PORT); - outw(0, ioaddr + IO_PORT); - outw(6 * (mc_count + 1), ioaddr + IO_PORT); - - netdev_for_each_mc_addr(ha, dev) { - eaddrs = (unsigned short *) ha->addr; - outw(*eaddrs++, ioaddr + IO_PORT); - outw(*eaddrs++, ioaddr + IO_PORT); - outw(*eaddrs++, ioaddr + IO_PORT); - } - - eaddrs = (unsigned short *) dev->dev_addr; - outw(eaddrs[0], ioaddr + IO_PORT); - outw(eaddrs[1], ioaddr + IO_PORT); - outw(eaddrs[2], ioaddr + IO_PORT); - outw(lp->tx_end, ioaddr + lp->xmt_bar); - outb(MC_SETUP, ioaddr); - - /* Update the transmit queue */ - i = lp->tx_end + XMT_HEADER + 6 * (mc_count + 1); - - if (lp->tx_start != lp->tx_end) - { - /* update the next address and the chain bit in the - last packet */ - outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG); - outw(i, ioaddr + IO_PORT); - outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG); - status = inw(ioaddr + IO_PORT); - outw(status | CHAIN_BIT, ioaddr + IO_PORT); - lp->tx_end = i ; - } - else { - lp->tx_start = lp->tx_end = i ; - } - - /* Acknowledge that the MC setup is done */ - do { /* We should be doing this in the eepro_interrupt()! */ - SLOW_DOWN; - SLOW_DOWN; - if (inb(ioaddr + STATUS_REG) & 0x08) - { - i = inb(ioaddr); - outb(0x08, ioaddr + STATUS_REG); - - if (i & 0x20) { /* command ABORTed */ - printk(KERN_NOTICE "%s: multicast setup failed.\n", - dev->name); - break; - } else if ((i & 0x0f) == 0x03) { /* MC-Done */ - printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n", - dev->name, mc_count, - mc_count > 1 ? "es":""); - break; - } - } - } while (++boguscount < 100); - - /* Re-enable RX and TX interrupts */ - eepro_en_int(ioaddr); - } - if (lp->eepro == LAN595FX_10ISA) { - eepro_complete_selreset(ioaddr); - } - else - eepro_en_rx(ioaddr); -} - -/* The horrible routine to read a word from the serial EEPROM. */ -/* IMPORTANT - the 82595 will be set to Bank 0 after the eeprom is read */ - -/* The delay between EEPROM clock transitions. */ -#define eeprom_delay() { udelay(40); } -#define EE_READ_CMD (6 << 6) - -static int -read_eeprom(int ioaddr, int location, struct net_device *dev) -{ - int i; - unsigned short retval = 0; - struct eepro_local *lp = netdev_priv(dev); - short ee_addr = ioaddr + lp->eeprom_reg; - int read_cmd = location | EE_READ_CMD; - short ctrl_val = EECS ; - - /* XXXX - black magic */ - eepro_sw2bank1(ioaddr); - outb(0x00, ioaddr + STATUS_REG); - /* XXXX - black magic */ - - eepro_sw2bank2(ioaddr); - outb(ctrl_val, ee_addr); - - /* Shift the read command bits out. */ - for (i = 8; i >= 0; i--) { - short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI - : ctrl_val; - outb(outval, ee_addr); - outb(outval | EESK, ee_addr); /* EEPROM clock tick. */ - eeprom_delay(); - outb(outval, ee_addr); /* Finish EEPROM a clock tick. */ - eeprom_delay(); - } - outb(ctrl_val, ee_addr); - - for (i = 16; i > 0; i--) { - outb(ctrl_val | EESK, ee_addr); eeprom_delay(); - retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0); - outb(ctrl_val, ee_addr); eeprom_delay(); - } - - /* Terminate the EEPROM access. */ - ctrl_val &= ~EECS; - outb(ctrl_val | EESK, ee_addr); - eeprom_delay(); - outb(ctrl_val, ee_addr); - eeprom_delay(); - eepro_sw2bank0(ioaddr); - return retval; -} - -static int -hardware_send_packet(struct net_device *dev, void *buf, short length) -{ - struct eepro_local *lp = netdev_priv(dev); - short ioaddr = dev->base_addr; - unsigned status, tx_available, last, end; - - if (net_debug > 5) - printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name); - - /* determine how much of the transmit buffer space is available */ - if (lp->tx_end > lp->tx_start) - tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start); - else if (lp->tx_end < lp->tx_start) - tx_available = lp->tx_start - lp->tx_end; - else tx_available = lp->xmt_ram; - - if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) { - /* No space available ??? */ - return 1; - } - - last = lp->tx_end; - end = last + (((length + 3) >> 1) << 1) + XMT_HEADER; - - if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */ - if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) { - /* Arrrr!!!, must keep the xmt header together, - several days were lost to chase this one down. */ - last = lp->xmt_lower_limit; - end = last + (((length + 3) >> 1) << 1) + XMT_HEADER; - } - else end = lp->xmt_lower_limit + (end - - lp->xmt_upper_limit + 2); - } - - outw(last, ioaddr + HOST_ADDRESS_REG); - outw(XMT_CMD, ioaddr + IO_PORT); - outw(0, ioaddr + IO_PORT); - outw(end, ioaddr + IO_PORT); - outw(length, ioaddr + IO_PORT); - - if (lp->version == LAN595) - outsw(ioaddr + IO_PORT, buf, (length + 3) >> 1); - else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */ - unsigned short temp = inb(ioaddr + INT_MASK_REG); - outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG); - outsl(ioaddr + IO_PORT_32_BIT, buf, (length + 3) >> 2); - outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG); - } - - /* A dummy read to flush the DRAM write pipeline */ - status = inw(ioaddr + IO_PORT); - - if (lp->tx_start == lp->tx_end) { - outw(last, ioaddr + lp->xmt_bar); - outb(XMT_CMD, ioaddr); - lp->tx_start = last; /* I don't like to change tx_start here */ - } - else { - /* update the next address and the chain bit in the - last packet */ - - if (lp->tx_end != last) { - outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG); - outw(last, ioaddr + IO_PORT); - } - - outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG); - status = inw(ioaddr + IO_PORT); - outw(status | CHAIN_BIT, ioaddr + IO_PORT); - - /* Continue the transmit command */ - outb(RESUME_XMT_CMD, ioaddr); - } - - lp->tx_last = last; - lp->tx_end = end; - - if (net_debug > 5) - printk(KERN_DEBUG "%s: exiting hardware_send_packet routine.\n", dev->name); - - return 0; -} - -static void -eepro_rx(struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - short ioaddr = dev->base_addr; - short boguscount = 20; - short rcv_car = lp->rx_start; - unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size; - - if (net_debug > 5) - printk(KERN_DEBUG "%s: entering eepro_rx routine.\n", dev->name); - - /* Set the read pointer to the start of the RCV */ - outw(rcv_car, ioaddr + HOST_ADDRESS_REG); - - rcv_event = inw(ioaddr + IO_PORT); - - while (rcv_event == RCV_DONE) { - - rcv_status = inw(ioaddr + IO_PORT); - rcv_next_frame = inw(ioaddr + IO_PORT); - rcv_size = inw(ioaddr + IO_PORT); - - if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) { - - /* Malloc up new buffer. */ - struct sk_buff *skb; - - dev->stats.rx_bytes+=rcv_size; - rcv_size &= 0x3fff; - skb = netdev_alloc_skb(dev, rcv_size + 5); - if (skb == NULL) { - printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); - dev->stats.rx_dropped++; - rcv_car = lp->rx_start + RCV_HEADER + rcv_size; - lp->rx_start = rcv_next_frame; - outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); - - break; - } - skb_reserve(skb,2); - - if (lp->version == LAN595) - insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 3) >> 1); - else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */ - unsigned short temp = inb(ioaddr + INT_MASK_REG); - outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG); - insl(ioaddr+IO_PORT_32_BIT, skb_put(skb,rcv_size), - (rcv_size + 3) >> 2); - outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG); - } - - skb->protocol = eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - } - - else { /* Not sure will ever reach here, - I set the 595 to discard bad received frames */ - dev->stats.rx_errors++; - - if (rcv_status & 0x0100) - dev->stats.rx_over_errors++; - - else if (rcv_status & 0x0400) - dev->stats.rx_frame_errors++; - - else if (rcv_status & 0x0800) - dev->stats.rx_crc_errors++; - - printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n", - dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size); - } - - if (rcv_status & 0x1000) - dev->stats.rx_length_errors++; - - rcv_car = lp->rx_start + RCV_HEADER + rcv_size; - lp->rx_start = rcv_next_frame; - - if (--boguscount == 0) - break; - - outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); - rcv_event = inw(ioaddr + IO_PORT); - - } - if (rcv_car == 0) - rcv_car = lp->rcv_upper_limit | 0xff; - - outw(rcv_car - 1, ioaddr + RCV_STOP); - - if (net_debug > 5) - printk(KERN_DEBUG "%s: exiting eepro_rx routine.\n", dev->name); -} - -static void -eepro_transmit_interrupt(struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - short ioaddr = dev->base_addr; - short boguscount = 25; - short xmt_status; - - while ((lp->tx_start != lp->tx_end) && boguscount--) { - - outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG); - xmt_status = inw(ioaddr+IO_PORT); - - if (!(xmt_status & TX_DONE_BIT)) - break; - - xmt_status = inw(ioaddr+IO_PORT); - lp->tx_start = inw(ioaddr+IO_PORT); - - netif_wake_queue (dev); - - if (xmt_status & TX_OK) - dev->stats.tx_packets++; - else { - dev->stats.tx_errors++; - if (xmt_status & 0x0400) { - dev->stats.tx_carrier_errors++; - printk(KERN_DEBUG "%s: carrier error\n", - dev->name); - printk(KERN_DEBUG "%s: XMT status = %#x\n", - dev->name, xmt_status); - } - else { - printk(KERN_DEBUG "%s: XMT status = %#x\n", - dev->name, xmt_status); - printk(KERN_DEBUG "%s: XMT status = %#x\n", - dev->name, xmt_status); - } - } - if (xmt_status & 0x000f) { - dev->stats.collisions += (xmt_status & 0x000f); - } - - if ((xmt_status & 0x0040) == 0x0) { - dev->stats.tx_heartbeat_errors++; - } - } -} - -static int eepro_ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct eepro_local *lp = netdev_priv(dev); - - cmd->supported = SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_Autoneg; - cmd->advertising = ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_Autoneg; - - if (GetBit(lp->word[5], ee_PortTPE)) { - cmd->supported |= SUPPORTED_TP; - cmd->advertising |= ADVERTISED_TP; - } - if (GetBit(lp->word[5], ee_PortBNC)) { - cmd->supported |= SUPPORTED_BNC; - cmd->advertising |= ADVERTISED_BNC; - } - if (GetBit(lp->word[5], ee_PortAUI)) { - cmd->supported |= SUPPORTED_AUI; - cmd->advertising |= ADVERTISED_AUI; - } - - ethtool_cmd_speed_set(cmd, SPEED_10); - - if (dev->if_port == TPE && lp->word[1] & ee_Duplex) { - cmd->duplex = DUPLEX_FULL; - } - else { - cmd->duplex = DUPLEX_HALF; - } - - cmd->port = dev->if_port; - cmd->phy_address = dev->base_addr; - cmd->transceiver = XCVR_INTERNAL; - - if (lp->word[0] & ee_AutoNeg) { - cmd->autoneg = 1; - } - - return 0; -} - -static void eepro_ethtool_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo) -{ - strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); - snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info), - "ISA 0x%lx", dev->base_addr); -} - -static const struct ethtool_ops eepro_ethtool_ops = { - .get_settings = eepro_ethtool_get_settings, - .get_drvinfo = eepro_ethtool_get_drvinfo, -}; - -#ifdef MODULE - -#define MAX_EEPRO 8 -static struct net_device *dev_eepro[MAX_EEPRO]; - -static int io[MAX_EEPRO] = { - [0 ... MAX_EEPRO-1] = -1 -}; -static int irq[MAX_EEPRO]; -static int mem[MAX_EEPRO] = { /* Size of the rx buffer in KB */ - [0 ... MAX_EEPRO-1] = RCV_DEFAULT_RAM/1024 -}; -static int autodetect; - -static int n_eepro; -/* For linux 2.1.xx */ - -MODULE_AUTHOR("Pascal Dupuis and others"); -MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver"); -MODULE_LICENSE("GPL"); - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); -module_param(autodetect, int, 0); -MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)"); -MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); -MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); -MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); - -int __init init_module(void) -{ - struct net_device *dev; - int i; - if (io[0] == -1 && autodetect == 0) { - printk(KERN_WARNING "eepro_init_module: Probe is very dangerous in ISA boards!\n"); - printk(KERN_WARNING "eepro_init_module: Please add \"autodetect=1\" to force probe\n"); - return -ENODEV; - } - else if (autodetect) { - /* if autodetect is set then we must force detection */ - for (i = 0; i < MAX_EEPRO; i++) { - io[i] = 0; - } - - printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n"); - } - - for (i = 0; i < MAX_EEPRO && io[i] != -1; i++) { - dev = alloc_etherdev(sizeof(struct eepro_local)); - if (!dev) - break; - - dev->mem_end = mem[i]; - dev->base_addr = io[i]; - dev->irq = irq[i]; - - if (do_eepro_probe(dev) == 0) { - dev_eepro[n_eepro++] = dev; - continue; - } - free_netdev(dev); - break; - } - - if (n_eepro) - printk(KERN_INFO "%s", version); - - return n_eepro ? 0 : -ENODEV; -} - -void __exit -cleanup_module(void) -{ - int i; - - for (i=0; i<n_eepro; i++) { - struct net_device *dev = dev_eepro[i]; - unregister_netdev(dev); - release_region(dev->base_addr, EEPRO_IO_EXTENT); - free_netdev(dev); - } -} -#endif /* MODULE */ diff --git a/drivers/net/ethernet/i825xx/eexpress.c b/drivers/net/ethernet/i825xx/eexpress.c deleted file mode 100644 index 7a6a2f04c5b1..000000000000 --- a/drivers/net/ethernet/i825xx/eexpress.c +++ /dev/null @@ -1,1661 +0,0 @@ -/* Intel EtherExpress 16 device driver for Linux - * - * Written by John Sullivan, 1995 - * based on original code by Donald Becker, with changes by - * Alan Cox and Pauline Middelink. - * - * Support for 8-bit mode by Zoltan Szilagyi <zoltans@cs.arizona.edu> - * - * Many modifications, and currently maintained, by - * Philip Blundell <philb@gnu.org> - * Added the Compaq LTE Alan Cox <alan@lxorguk.ukuu.org.uk> - * Added MCA support Adam Fritzler (now deleted) - * - * Note - this driver is experimental still - it has problems on faster - * machines. Someone needs to sit down and go through it line by line with - * a databook... - */ - -/* The EtherExpress 16 is a fairly simple card, based on a shared-memory - * design using the i82586 Ethernet coprocessor. It bears no relationship, - * as far as I know, to the similarly-named "EtherExpress Pro" range. - * - * Historically, Linux support for these cards has been very bad. However, - * things seem to be getting better slowly. - */ - -/* If your card is confused about what sort of interface it has (eg it - * persistently reports "10baseT" when none is fitted), running 'SOFTSET /BART' - * or 'SOFTSET /LISA' from DOS seems to help. - */ - -/* Here's the scoop on memory mapping. - * - * There are three ways to access EtherExpress card memory: either using the - * shared-memory mapping, or using PIO through the dataport, or using PIO - * through the "shadow memory" ports. - * - * The shadow memory system works by having the card map some of its memory - * as follows: - * - * (the low five bits of the SMPTR are ignored) - * - * base+0x4000..400f memory at SMPTR+0..15 - * base+0x8000..800f memory at SMPTR+16..31 - * base+0xc000..c007 dubious stuff (memory at SMPTR+16..23 apparently) - * base+0xc008..c00f memory at 0x0008..0x000f - * - * This last set (the one at c008) is particularly handy because the SCB - * lives at 0x0008. So that set of ports gives us easy random access to data - * in the SCB without having to mess around setting up pointers and the like. - * We always use this method to access the SCB (via the scb_xx() functions). - * - * Dataport access works by aiming the appropriate (read or write) pointer - * at the first address you're interested in, and then reading or writing from - * the dataport. The pointers auto-increment after each transfer. We use - * this for data transfer. - * - * We don't use the shared-memory system because it allegedly doesn't work on - * all cards, and because it's a bit more prone to go wrong (it's one more - * thing to configure...). - */ - -/* Known bugs: - * - * - The card seems to want to give us two interrupts every time something - * happens, where just one would be better. - */ - -/* - * - * Note by Zoltan Szilagyi 10-12-96: - * - * I've succeeded in eliminating the "CU wedged" messages, and hence the - * lockups, which were only occurring with cards running in 8-bit mode ("force - * 8-bit operation" in Intel's SoftSet utility). This version of the driver - * sets the 82586 and the ASIC to 8-bit mode at startup; it also stops the - * CU before submitting a packet for transmission, and then restarts it as soon - * as the process of handing the packet is complete. This is definitely an - * unnecessary slowdown if the card is running in 16-bit mode; therefore one - * should detect 16-bit vs 8-bit mode from the EEPROM settings and act - * accordingly. In 8-bit mode with this bugfix I'm getting about 150 K/s for - * ftp's, which is significantly better than I get in DOS, so the overhead of - * stopping and restarting the CU with each transmit is not prohibitive in - * practice. - * - * Update by David Woodhouse 11/5/99: - * - * I've seen "CU wedged" messages in 16-bit mode, on the Alpha architecture. - * I assume that this is because 16-bit accesses are actually handled as two - * 8-bit accesses. - */ - -#ifdef __alpha__ -#define LOCKUP16 1 -#endif -#ifndef LOCKUP16 -#define LOCKUP16 0 -#endif - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/string.h> -#include <linux/in.h> -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/bitops.h> -#include <linux/jiffies.h> - -#include <asm/io.h> -#include <asm/irq.h> - -#ifndef NET_DEBUG -#define NET_DEBUG 4 -#endif - -#include "eexpress.h" - -#define EEXP_IO_EXTENT 16 - -/* - * Private data declarations - */ - -struct net_local -{ - unsigned long last_tx; /* jiffies when last transmit started */ - unsigned long init_time; /* jiffies when eexp_hw_init586 called */ - unsigned short rx_first; /* first rx buf, same as RX_BUF_START */ - unsigned short rx_last; /* last rx buf */ - unsigned short rx_ptr; /* first rx buf to look at */ - unsigned short tx_head; /* next free tx buf */ - unsigned short tx_reap; /* first in-use tx buf */ - unsigned short tx_tail; /* previous tx buf to tx_head */ - unsigned short tx_link; /* last known-executing tx buf */ - unsigned short last_tx_restart; /* set to tx_link when we - restart the CU */ - unsigned char started; - unsigned short rx_buf_start; - unsigned short rx_buf_end; - unsigned short num_tx_bufs; - unsigned short num_rx_bufs; - unsigned char width; /* 0 for 16bit, 1 for 8bit */ - unsigned char was_promisc; - unsigned char old_mc_count; - spinlock_t lock; -}; - -/* This is the code and data that is downloaded to the EtherExpress card's - * memory at boot time. - */ - -static unsigned short start_code[] = { -/* 0x0000 */ - 0x0001, /* ISCP: busy - cleared after reset */ - 0x0008,0x0000,0x0000, /* offset,address (lo,hi) of SCB */ - - 0x0000,0x0000, /* SCB: status, commands */ - 0x0000,0x0000, /* links to first command block, - first receive descriptor */ - 0x0000,0x0000, /* CRC error, alignment error counts */ - 0x0000,0x0000, /* out of resources, overrun error counts */ - - 0x0000,0x0000, /* pad */ - 0x0000,0x0000, - -/* 0x20 -- start of 82586 CU program */ -#define CONF_LINK 0x20 - 0x0000,Cmd_Config, - 0x0032, /* link to next command */ - 0x080c, /* 12 bytes follow : fifo threshold=8 */ - 0x2e40, /* don't rx bad frames - * SRDY/ARDY => ext. sync. : preamble len=8 - * take addresses from data buffers - * 6 bytes/address - */ - 0x6000, /* default backoff method & priority - * interframe spacing = 0x60 */ - 0xf200, /* slot time=0x200 - * max collision retry = 0xf */ -#define CONF_PROMISC 0x2e - 0x0000, /* no HDLC : normal CRC : enable broadcast - * disable promiscuous/multicast modes */ - 0x003c, /* minimum frame length = 60 octets) */ - - 0x0000,Cmd_SetAddr, - 0x003e, /* link to next command */ -#define CONF_HWADDR 0x38 - 0x0000,0x0000,0x0000, /* hardware address placed here */ - - 0x0000,Cmd_MCast, - 0x0076, /* link to next command */ -#define CONF_NR_MULTICAST 0x44 - 0x0000, /* number of bytes in multicast address(es) */ -#define CONF_MULTICAST 0x46 - 0x0000, 0x0000, 0x0000, /* some addresses */ - 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, - -#define CONF_DIAG_RESULT 0x76 - 0x0000, Cmd_Diag, - 0x007c, /* link to next command */ - - 0x0000,Cmd_TDR|Cmd_INT, - 0x0084, -#define CONF_TDR_RESULT 0x82 - 0x0000, - - 0x0000,Cmd_END|Cmd_Nop, /* end of configure sequence */ - 0x0084 /* dummy link */ -}; - -/* maps irq number to EtherExpress magic value */ -static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 }; - -/* - * Prototypes for Linux interface - */ - -static int eexp_open(struct net_device *dev); -static int eexp_close(struct net_device *dev); -static void eexp_timeout(struct net_device *dev); -static netdev_tx_t eexp_xmit(struct sk_buff *buf, - struct net_device *dev); - -static irqreturn_t eexp_irq(int irq, void *dev_addr); -static void eexp_set_multicast(struct net_device *dev); - -/* - * Prototypes for hardware access functions - */ - -static void eexp_hw_rx_pio(struct net_device *dev); -static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf, - unsigned short len); -static int eexp_hw_probe(struct net_device *dev,unsigned short ioaddr); -static unsigned short eexp_hw_readeeprom(unsigned short ioaddr, - unsigned char location); - -static unsigned short eexp_hw_lasttxstat(struct net_device *dev); -static void eexp_hw_txrestart(struct net_device *dev); - -static void eexp_hw_txinit (struct net_device *dev); -static void eexp_hw_rxinit (struct net_device *dev); - -static void eexp_hw_init586 (struct net_device *dev); -static void eexp_setup_filter (struct net_device *dev); - -static char *eexp_ifmap[]={"AUI", "BNC", "RJ45"}; -enum eexp_iftype {AUI=0, BNC=1, TPE=2}; - -#define STARTED_RU 2 -#define STARTED_CU 1 - -/* - * Primitive hardware access functions. - */ - -static inline unsigned short scb_status(struct net_device *dev) -{ - return inw(dev->base_addr + 0xc008); -} - -static inline unsigned short scb_rdcmd(struct net_device *dev) -{ - return inw(dev->base_addr + 0xc00a); -} - -static inline void scb_command(struct net_device *dev, unsigned short cmd) -{ - outw(cmd, dev->base_addr + 0xc00a); -} - -static inline void scb_wrcbl(struct net_device *dev, unsigned short val) -{ - outw(val, dev->base_addr + 0xc00c); -} - -static inline void scb_wrrfa(struct net_device *dev, unsigned short val) -{ - outw(val, dev->base_addr + 0xc00e); -} - -static inline void set_loopback(struct net_device *dev) -{ - outb(inb(dev->base_addr + Config) | 2, dev->base_addr + Config); -} - -static inline void clear_loopback(struct net_device *dev) -{ - outb(inb(dev->base_addr + Config) & ~2, dev->base_addr + Config); -} - -static inline unsigned short int SHADOW(short int addr) -{ - addr &= 0x1f; - if (addr > 0xf) addr += 0x3ff0; - return addr + 0x4000; -} - -/* - * Linux interface - */ - -/* - * checks for presence of EtherExpress card - */ - -static int __init do_express_probe(struct net_device *dev) -{ - unsigned short *port; - static unsigned short ports[] = { 0x240,0x300,0x310,0x270,0x320,0x340,0 }; - unsigned short ioaddr = dev->base_addr; - int dev_irq = dev->irq; - int err; - - dev->if_port = 0xff; /* not set */ - - if (ioaddr&0xfe00) { - if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) - return -EBUSY; - err = eexp_hw_probe(dev,ioaddr); - release_region(ioaddr, EEXP_IO_EXTENT); - return err; - } else if (ioaddr) - return -ENXIO; - - for (port=&ports[0] ; *port ; port++ ) - { - unsigned short sum = 0; - int i; - if (!request_region(*port, EEXP_IO_EXTENT, "EtherExpress")) - continue; - for ( i=0 ; i<4 ; i++ ) - { - unsigned short t; - t = inb(*port + ID_PORT); - sum |= (t>>4) << ((t & 0x03)<<2); - } - if (sum==0xbaba && !eexp_hw_probe(dev,*port)) { - release_region(*port, EEXP_IO_EXTENT); - return 0; - } - release_region(*port, EEXP_IO_EXTENT); - dev->irq = dev_irq; - } - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init express_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_express_probe(dev); - if (!err) - return dev; - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -/* - * open and initialize the adapter, ready for use - */ - -static int eexp_open(struct net_device *dev) -{ - int ret; - unsigned short ioaddr = dev->base_addr; - struct net_local *lp = netdev_priv(dev); - -#if NET_DEBUG > 6 - printk(KERN_DEBUG "%s: eexp_open()\n", dev->name); -#endif - - if (!dev->irq || !irqrmap[dev->irq]) - return -ENXIO; - - ret = request_irq(dev->irq, eexp_irq, 0, dev->name, dev); - if (ret) - return ret; - - if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) { - printk(KERN_WARNING "EtherExpress io port %x, is busy.\n" - , ioaddr); - goto err_out1; - } - if (!request_region(ioaddr+0x4000, EEXP_IO_EXTENT, "EtherExpress shadow")) { - printk(KERN_WARNING "EtherExpress io port %x, is busy.\n" - , ioaddr+0x4000); - goto err_out2; - } - if (!request_region(ioaddr+0x8000, EEXP_IO_EXTENT, "EtherExpress shadow")) { - printk(KERN_WARNING "EtherExpress io port %x, is busy.\n" - , ioaddr+0x8000); - goto err_out3; - } - if (!request_region(ioaddr+0xc000, EEXP_IO_EXTENT, "EtherExpress shadow")) { - printk(KERN_WARNING "EtherExpress io port %x, is busy.\n" - , ioaddr+0xc000); - goto err_out4; - } - - if (lp->width) { - printk("%s: forcing ASIC to 8-bit mode\n", dev->name); - outb(inb(dev->base_addr+Config)&~4, dev->base_addr+Config); - } - - eexp_hw_init586(dev); - netif_start_queue(dev); -#if NET_DEBUG > 6 - printk(KERN_DEBUG "%s: leaving eexp_open()\n", dev->name); -#endif - return 0; - - err_out4: - release_region(ioaddr+0x8000, EEXP_IO_EXTENT); - err_out3: - release_region(ioaddr+0x4000, EEXP_IO_EXTENT); - err_out2: - release_region(ioaddr, EEXP_IO_EXTENT); - err_out1: - free_irq(dev->irq, dev); - return -EBUSY; -} - -/* - * close and disable the interface, leaving the 586 in reset. - */ - -static int eexp_close(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - struct net_local *lp = netdev_priv(dev); - - int irq = dev->irq; - - netif_stop_queue(dev); - - outb(SIRQ_dis|irqrmap[irq],ioaddr+SET_IRQ); - lp->started = 0; - scb_command(dev, SCB_CUsuspend|SCB_RUsuspend); - outb(0,ioaddr+SIGNAL_CA); - free_irq(irq,dev); - outb(i586_RST,ioaddr+EEPROM_Ctrl); - release_region(ioaddr, EEXP_IO_EXTENT); - release_region(ioaddr+0x4000, 16); - release_region(ioaddr+0x8000, 16); - release_region(ioaddr+0xc000, 16); - - return 0; -} - -/* - * This gets called when a higher level thinks we are broken. Check that - * nothing has become jammed in the CU. - */ - -static void unstick_cu(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short ioaddr = dev->base_addr; - - if (lp->started) - { - if (time_after(jiffies, dev_trans_start(dev) + HZ/2)) - { - if (lp->tx_link==lp->last_tx_restart) - { - unsigned short boguscount=200,rsst; - printk(KERN_WARNING "%s: Retransmit timed out, status %04x, resetting...\n", - dev->name, scb_status(dev)); - eexp_hw_txinit(dev); - lp->last_tx_restart = 0; - scb_wrcbl(dev, lp->tx_link); - scb_command(dev, SCB_CUstart); - outb(0,ioaddr+SIGNAL_CA); - while (!SCB_complete(rsst=scb_status(dev))) - { - if (!--boguscount) - { - boguscount=200; - printk(KERN_WARNING "%s: Reset timed out status %04x, retrying...\n", - dev->name,rsst); - scb_wrcbl(dev, lp->tx_link); - scb_command(dev, SCB_CUstart); - outb(0,ioaddr+SIGNAL_CA); - } - } - netif_wake_queue(dev); - } - else - { - unsigned short status = scb_status(dev); - if (SCB_CUdead(status)) - { - unsigned short txstatus = eexp_hw_lasttxstat(dev); - printk(KERN_WARNING "%s: Transmit timed out, CU not active status %04x %04x, restarting...\n", - dev->name, status, txstatus); - eexp_hw_txrestart(dev); - } - else - { - unsigned short txstatus = eexp_hw_lasttxstat(dev); - if (netif_queue_stopped(dev) && !txstatus) - { - printk(KERN_WARNING "%s: CU wedged, status %04x %04x, resetting...\n", - dev->name,status,txstatus); - eexp_hw_init586(dev); - netif_wake_queue(dev); - } - else - { - printk(KERN_WARNING "%s: transmit timed out\n", dev->name); - } - } - } - } - } - else - { - if (time_after(jiffies, lp->init_time + 10)) - { - unsigned short status = scb_status(dev); - printk(KERN_WARNING "%s: i82586 startup timed out, status %04x, resetting...\n", - dev->name, status); - eexp_hw_init586(dev); - netif_wake_queue(dev); - } - } -} - -static void eexp_timeout(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); -#ifdef CONFIG_SMP - unsigned long flags; -#endif - int status; - - disable_irq(dev->irq); - - /* - * Best would be to use synchronize_irq(); spin_lock() here - * lets make it work first.. - */ - -#ifdef CONFIG_SMP - spin_lock_irqsave(&lp->lock, flags); -#endif - - status = scb_status(dev); - unstick_cu(dev); - printk(KERN_INFO "%s: transmit timed out, %s?\n", dev->name, - (SCB_complete(status)?"lost interrupt": - "board on fire")); - dev->stats.tx_errors++; - lp->last_tx = jiffies; - if (!SCB_complete(status)) { - scb_command(dev, SCB_CUabort); - outb(0,dev->base_addr+SIGNAL_CA); - } - netif_wake_queue(dev); -#ifdef CONFIG_SMP - spin_unlock_irqrestore(&lp->lock, flags); -#endif -} - -/* - * Called to transmit a packet, or to allow us to right ourselves - * if the kernel thinks we've died. - */ -static netdev_tx_t eexp_xmit(struct sk_buff *buf, struct net_device *dev) -{ - short length = buf->len; -#ifdef CONFIG_SMP - struct net_local *lp = netdev_priv(dev); - unsigned long flags; -#endif - -#if NET_DEBUG > 6 - printk(KERN_DEBUG "%s: eexp_xmit()\n", dev->name); -#endif - - if (buf->len < ETH_ZLEN) { - if (skb_padto(buf, ETH_ZLEN)) - return NETDEV_TX_OK; - length = ETH_ZLEN; - } - - disable_irq(dev->irq); - - /* - * Best would be to use synchronize_irq(); spin_lock() here - * lets make it work first.. - */ - -#ifdef CONFIG_SMP - spin_lock_irqsave(&lp->lock, flags); -#endif - - { - unsigned short *data = (unsigned short *)buf->data; - - dev->stats.tx_bytes += length; - - eexp_hw_tx_pio(dev,data,length); - } - dev_kfree_skb(buf); -#ifdef CONFIG_SMP - spin_unlock_irqrestore(&lp->lock, flags); -#endif - enable_irq(dev->irq); - return NETDEV_TX_OK; -} - -/* - * Handle an EtherExpress interrupt - * If we've finished initializing, start the RU and CU up. - * If we've already started, reap tx buffers, handle any received packets, - * check to make sure we've not become wedged. - */ - -static unsigned short eexp_start_irq(struct net_device *dev, - unsigned short status) -{ - unsigned short ack_cmd = SCB_ack(status); - struct net_local *lp = netdev_priv(dev); - unsigned short ioaddr = dev->base_addr; - if ((dev->flags & IFF_UP) && !(lp->started & STARTED_CU)) { - short diag_status, tdr_status; - while (SCB_CUstat(status)==2) - status = scb_status(dev); -#if NET_DEBUG > 4 - printk("%s: CU went non-active (status %04x)\n", - dev->name, status); -#endif - - outw(CONF_DIAG_RESULT & ~31, ioaddr + SM_PTR); - diag_status = inw(ioaddr + SHADOW(CONF_DIAG_RESULT)); - if (diag_status & 1<<11) { - printk(KERN_WARNING "%s: 82586 failed self-test\n", - dev->name); - } else if (!(diag_status & 1<<13)) { - printk(KERN_WARNING "%s: 82586 self-test failed to complete\n", dev->name); - } - - outw(CONF_TDR_RESULT & ~31, ioaddr + SM_PTR); - tdr_status = inw(ioaddr + SHADOW(CONF_TDR_RESULT)); - if (tdr_status & (TDR_SHORT|TDR_OPEN)) { - printk(KERN_WARNING "%s: TDR reports cable %s at %d tick%s\n", dev->name, (tdr_status & TDR_SHORT)?"short":"broken", tdr_status & TDR_TIME, ((tdr_status & TDR_TIME) != 1) ? "s" : ""); - } - else if (tdr_status & TDR_XCVRPROBLEM) { - printk(KERN_WARNING "%s: TDR reports transceiver problem\n", dev->name); - } - else if (tdr_status & TDR_LINKOK) { -#if NET_DEBUG > 4 - printk(KERN_DEBUG "%s: TDR reports link OK\n", dev->name); -#endif - } else { - printk("%s: TDR is ga-ga (status %04x)\n", dev->name, - tdr_status); - } - - lp->started |= STARTED_CU; - scb_wrcbl(dev, lp->tx_link); - /* if the RU isn't running, start it now */ - if (!(lp->started & STARTED_RU)) { - ack_cmd |= SCB_RUstart; - scb_wrrfa(dev, lp->rx_buf_start); - lp->rx_ptr = lp->rx_buf_start; - lp->started |= STARTED_RU; - } - ack_cmd |= SCB_CUstart | 0x2000; - } - - if ((dev->flags & IFF_UP) && !(lp->started & STARTED_RU) && SCB_RUstat(status)==4) - lp->started|=STARTED_RU; - - return ack_cmd; -} - -static void eexp_cmd_clear(struct net_device *dev) -{ - unsigned long int oldtime = jiffies; - while (scb_rdcmd(dev) && (time_before(jiffies, oldtime + 10))); - if (scb_rdcmd(dev)) { - printk("%s: command didn't clear\n", dev->name); - } -} - -static irqreturn_t eexp_irq(int dummy, void *dev_info) -{ - struct net_device *dev = dev_info; - struct net_local *lp; - unsigned short ioaddr,status,ack_cmd; - unsigned short old_read_ptr, old_write_ptr; - - lp = netdev_priv(dev); - ioaddr = dev->base_addr; - - spin_lock(&lp->lock); - - old_read_ptr = inw(ioaddr+READ_PTR); - old_write_ptr = inw(ioaddr+WRITE_PTR); - - outb(SIRQ_dis|irqrmap[dev->irq], ioaddr+SET_IRQ); - - status = scb_status(dev); - -#if NET_DEBUG > 4 - printk(KERN_DEBUG "%s: interrupt (status %x)\n", dev->name, status); -#endif - - if (lp->started == (STARTED_CU | STARTED_RU)) { - - do { - eexp_cmd_clear(dev); - - ack_cmd = SCB_ack(status); - scb_command(dev, ack_cmd); - outb(0,ioaddr+SIGNAL_CA); - - eexp_cmd_clear(dev); - - if (SCB_complete(status)) { - if (!eexp_hw_lasttxstat(dev)) { - printk("%s: tx interrupt but no status\n", dev->name); - } - } - - if (SCB_rxdframe(status)) - eexp_hw_rx_pio(dev); - - status = scb_status(dev); - } while (status & 0xc000); - - if (SCB_RUdead(status)) - { - printk(KERN_WARNING "%s: RU stopped: status %04x\n", - dev->name,status); -#if 0 - printk(KERN_WARNING "%s: cur_rfd=%04x, cur_rbd=%04x\n", dev->name, lp->cur_rfd, lp->cur_rbd); - outw(lp->cur_rfd, ioaddr+READ_PTR); - printk(KERN_WARNING "%s: [%04x]\n", dev->name, inw(ioaddr+DATAPORT)); - outw(lp->cur_rfd+6, ioaddr+READ_PTR); - printk(KERN_WARNING "%s: rbd is %04x\n", dev->name, rbd= inw(ioaddr+DATAPORT)); - outw(rbd, ioaddr+READ_PTR); - printk(KERN_WARNING "%s: [%04x %04x] ", dev->name, inw(ioaddr+DATAPORT), inw(ioaddr+DATAPORT)); - outw(rbd+8, ioaddr+READ_PTR); - printk("[%04x]\n", inw(ioaddr+DATAPORT)); -#endif - dev->stats.rx_errors++; -#if 1 - eexp_hw_rxinit(dev); -#else - lp->cur_rfd = lp->first_rfd; -#endif - scb_wrrfa(dev, lp->rx_buf_start); - scb_command(dev, SCB_RUstart); - outb(0,ioaddr+SIGNAL_CA); - } - } else { - if (status & 0x8000) - ack_cmd = eexp_start_irq(dev, status); - else - ack_cmd = SCB_ack(status); - scb_command(dev, ack_cmd); - outb(0,ioaddr+SIGNAL_CA); - } - - eexp_cmd_clear(dev); - - outb(SIRQ_en|irqrmap[dev->irq], ioaddr+SET_IRQ); - -#if NET_DEBUG > 6 - printk("%s: leaving eexp_irq()\n", dev->name); -#endif - outw(old_read_ptr, ioaddr+READ_PTR); - outw(old_write_ptr, ioaddr+WRITE_PTR); - - spin_unlock(&lp->lock); - return IRQ_HANDLED; -} - -/* - * Hardware access functions - */ - -/* - * Set the cable type to use. - */ - -static void eexp_hw_set_interface(struct net_device *dev) -{ - unsigned char oldval = inb(dev->base_addr + 0x300e); - oldval &= ~0x82; - switch (dev->if_port) { - case TPE: - oldval |= 0x2; - case BNC: - oldval |= 0x80; - break; - } - outb(oldval, dev->base_addr+0x300e); - mdelay(20); -} - -/* - * Check all the receive buffers, and hand any received packets - * to the upper levels. Basic sanity check on each frame - * descriptor, though we don't bother trying to fix broken ones. - */ - -static void eexp_hw_rx_pio(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short rx_block = lp->rx_ptr; - unsigned short boguscount = lp->num_rx_bufs; - unsigned short ioaddr = dev->base_addr; - unsigned short status; - -#if NET_DEBUG > 6 - printk(KERN_DEBUG "%s: eexp_hw_rx()\n", dev->name); -#endif - - do { - unsigned short rfd_cmd, rx_next, pbuf, pkt_len; - - outw(rx_block, ioaddr + READ_PTR); - status = inw(ioaddr + DATAPORT); - - if (FD_Done(status)) - { - rfd_cmd = inw(ioaddr + DATAPORT); - rx_next = inw(ioaddr + DATAPORT); - pbuf = inw(ioaddr + DATAPORT); - - outw(pbuf, ioaddr + READ_PTR); - pkt_len = inw(ioaddr + DATAPORT); - - if (rfd_cmd!=0x0000) - { - printk(KERN_WARNING "%s: rfd_cmd not zero:0x%04x\n", - dev->name, rfd_cmd); - continue; - } - else if (pbuf!=rx_block+0x16) - { - printk(KERN_WARNING "%s: rfd and rbd out of sync 0x%04x 0x%04x\n", - dev->name, rx_block+0x16, pbuf); - continue; - } - else if ((pkt_len & 0xc000)!=0xc000) - { - printk(KERN_WARNING "%s: EOF or F not set on received buffer (%04x)\n", - dev->name, pkt_len & 0xc000); - continue; - } - else if (!FD_OK(status)) - { - dev->stats.rx_errors++; - if (FD_CRC(status)) - dev->stats.rx_crc_errors++; - if (FD_Align(status)) - dev->stats.rx_frame_errors++; - if (FD_Resrc(status)) - dev->stats.rx_fifo_errors++; - if (FD_DMA(status)) - dev->stats.rx_over_errors++; - if (FD_Short(status)) - dev->stats.rx_length_errors++; - } - else - { - struct sk_buff *skb; - pkt_len &= 0x3fff; - skb = netdev_alloc_skb(dev, pkt_len + 16); - if (skb == NULL) - { - printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name); - dev->stats.rx_dropped++; - break; - } - skb_reserve(skb, 2); - outw(pbuf+10, ioaddr+READ_PTR); - insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1); - skb->protocol = eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - outw(rx_block, ioaddr+WRITE_PTR); - outw(0, ioaddr+DATAPORT); - outw(0, ioaddr+DATAPORT); - rx_block = rx_next; - } - } while (FD_Done(status) && boguscount--); - lp->rx_ptr = rx_block; -} - -/* - * Hand a packet to the card for transmission - * If we get here, we MUST have already checked - * to make sure there is room in the transmit - * buffer region. - */ - -static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf, - unsigned short len) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short ioaddr = dev->base_addr; - - if (LOCKUP16 || lp->width) { - /* Stop the CU so that there is no chance that it - jumps off to a bogus address while we are writing the - pointer to the next transmit packet in 8-bit mode -- - this eliminates the "CU wedged" errors in 8-bit mode. - (Zoltan Szilagyi 10-12-96) */ - scb_command(dev, SCB_CUsuspend); - outw(0xFFFF, ioaddr+SIGNAL_CA); - } - - outw(lp->tx_head, ioaddr + WRITE_PTR); - - outw(0x0000, ioaddr + DATAPORT); - outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT); - outw(lp->tx_head+0x08, ioaddr + DATAPORT); - outw(lp->tx_head+0x0e, ioaddr + DATAPORT); - - outw(0x0000, ioaddr + DATAPORT); - outw(0x0000, ioaddr + DATAPORT); - outw(lp->tx_head+0x08, ioaddr + DATAPORT); - - outw(0x8000|len, ioaddr + DATAPORT); - outw(-1, ioaddr + DATAPORT); - outw(lp->tx_head+0x16, ioaddr + DATAPORT); - outw(0, ioaddr + DATAPORT); - - outsw(ioaddr + DATAPORT, buf, (len+1)>>1); - - outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR); - outw(lp->tx_head, ioaddr + DATAPORT); - - dev->trans_start = jiffies; - lp->tx_tail = lp->tx_head; - if (lp->tx_head==TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE)) - lp->tx_head = TX_BUF_START; - else - lp->tx_head += TX_BUF_SIZE; - if (lp->tx_head != lp->tx_reap) - netif_wake_queue(dev); - - if (LOCKUP16 || lp->width) { - /* Restart the CU so that the packet can actually - be transmitted. (Zoltan Szilagyi 10-12-96) */ - scb_command(dev, SCB_CUresume); - outw(0xFFFF, ioaddr+SIGNAL_CA); - } - - dev->stats.tx_packets++; - lp->last_tx = jiffies; -} - -static const struct net_device_ops eexp_netdev_ops = { - .ndo_open = eexp_open, - .ndo_stop = eexp_close, - .ndo_start_xmit = eexp_xmit, - .ndo_set_rx_mode = eexp_set_multicast, - .ndo_tx_timeout = eexp_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/* - * Sanity check the suspected EtherExpress card - * Read hardware address, reset card, size memory and initialize buffer - * memory pointers. These are held in netdev_priv(), in case someone has more - * than one card in a machine. - */ - -static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr) -{ - unsigned short hw_addr[3]; - unsigned char buswidth; - unsigned int memory_size; - int i; - unsigned short xsum = 0; - struct net_local *lp = netdev_priv(dev); - - printk("%s: EtherExpress 16 at %#x ",dev->name,ioaddr); - - outb(ASIC_RST, ioaddr+EEPROM_Ctrl); - outb(0, ioaddr+EEPROM_Ctrl); - udelay(500); - outb(i586_RST, ioaddr+EEPROM_Ctrl); - - hw_addr[0] = eexp_hw_readeeprom(ioaddr,2); - hw_addr[1] = eexp_hw_readeeprom(ioaddr,3); - hw_addr[2] = eexp_hw_readeeprom(ioaddr,4); - - /* Standard Address or Compaq LTE Address */ - if (!((hw_addr[2]==0x00aa && ((hw_addr[1] & 0xff00)==0x0000)) || - (hw_addr[2]==0x0080 && ((hw_addr[1] & 0xff00)==0x5F00)))) - { - printk(" rejected: invalid address %04x%04x%04x\n", - hw_addr[2],hw_addr[1],hw_addr[0]); - return -ENODEV; - } - - /* Calculate the EEPROM checksum. Carry on anyway if it's bad, - * though. - */ - for (i = 0; i < 64; i++) - xsum += eexp_hw_readeeprom(ioaddr, i); - if (xsum != 0xbaba) - printk(" (bad EEPROM xsum 0x%02x)", xsum); - - dev->base_addr = ioaddr; - for ( i=0 ; i<6 ; i++ ) - dev->dev_addr[i] = ((unsigned char *)hw_addr)[5-i]; - - { - static const char irqmap[] = { 0, 9, 3, 4, 5, 10, 11, 0 }; - unsigned short setupval = eexp_hw_readeeprom(ioaddr,0); - - /* Use the IRQ from EEPROM if none was given */ - if (!dev->irq) - dev->irq = irqmap[setupval>>13]; - - if (dev->if_port == 0xff) { - dev->if_port = !(setupval & 0x1000) ? AUI : - eexp_hw_readeeprom(ioaddr,5) & 0x1 ? TPE : BNC; - } - - buswidth = !((setupval & 0x400) >> 10); - } - - memset(lp, 0, sizeof(struct net_local)); - spin_lock_init(&lp->lock); - - printk("(IRQ %d, %s connector, %d-bit bus", dev->irq, - eexp_ifmap[dev->if_port], buswidth?8:16); - - if (!request_region(dev->base_addr + 0x300e, 1, "EtherExpress")) - return -EBUSY; - - eexp_hw_set_interface(dev); - - release_region(dev->base_addr + 0x300e, 1); - - /* Find out how much RAM we have on the card */ - outw(0, dev->base_addr + WRITE_PTR); - for (i = 0; i < 32768; i++) - outw(0, dev->base_addr + DATAPORT); - - for (memory_size = 0; memory_size < 64; memory_size++) - { - outw(memory_size<<10, dev->base_addr + READ_PTR); - if (inw(dev->base_addr+DATAPORT)) - break; - outw(memory_size<<10, dev->base_addr + WRITE_PTR); - outw(memory_size | 0x5000, dev->base_addr+DATAPORT); - outw(memory_size<<10, dev->base_addr + READ_PTR); - if (inw(dev->base_addr+DATAPORT) != (memory_size | 0x5000)) - break; - } - - /* Sort out the number of buffers. We may have 16, 32, 48 or 64k - * of RAM to play with. - */ - lp->num_tx_bufs = 4; - lp->rx_buf_end = 0x3ff6; - switch (memory_size) - { - case 64: - lp->rx_buf_end += 0x4000; - case 48: - lp->num_tx_bufs += 4; - lp->rx_buf_end += 0x4000; - case 32: - lp->rx_buf_end += 0x4000; - case 16: - printk(", %dk RAM)\n", memory_size); - break; - default: - printk(") bad memory size (%dk).\n", memory_size); - return -ENODEV; - break; - } - - lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE); - lp->width = buswidth; - - dev->netdev_ops = &eexp_netdev_ops; - dev->watchdog_timeo = 2*HZ; - - return register_netdev(dev); -} - -/* - * Read a word from the EtherExpress on-board serial EEPROM. - * The EEPROM contains 64 words of 16 bits. - */ -static unsigned short __init eexp_hw_readeeprom(unsigned short ioaddr, - unsigned char location) -{ - unsigned short cmd = 0x180|(location&0x7f); - unsigned short rval = 0,wval = EC_CS|i586_RST; - int i; - - outb(EC_CS|i586_RST,ioaddr+EEPROM_Ctrl); - for (i=0x100 ; i ; i>>=1 ) - { - if (cmd&i) - wval |= EC_Wr; - else - wval &= ~EC_Wr; - - outb(wval,ioaddr+EEPROM_Ctrl); - outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl); - eeprom_delay(); - outb(wval,ioaddr+EEPROM_Ctrl); - eeprom_delay(); - } - wval &= ~EC_Wr; - outb(wval,ioaddr+EEPROM_Ctrl); - for (i=0x8000 ; i ; i>>=1 ) - { - outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl); - eeprom_delay(); - if (inb(ioaddr+EEPROM_Ctrl)&EC_Rd) - rval |= i; - outb(wval,ioaddr+EEPROM_Ctrl); - eeprom_delay(); - } - wval &= ~EC_CS; - outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl); - eeprom_delay(); - outb(wval,ioaddr+EEPROM_Ctrl); - eeprom_delay(); - return rval; -} - -/* - * Reap tx buffers and return last transmit status. - * if ==0 then either: - * a) we're not transmitting anything, so why are we here? - * b) we've died. - * otherwise, Stat_Busy(return) means we've still got some packets - * to transmit, Stat_Done(return) means our buffers should be empty - * again - */ - -static unsigned short eexp_hw_lasttxstat(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short tx_block = lp->tx_reap; - unsigned short status; - - if (!netif_queue_stopped(dev) && lp->tx_head==lp->tx_reap) - return 0x0000; - - do - { - outw(tx_block & ~31, dev->base_addr + SM_PTR); - status = inw(dev->base_addr + SHADOW(tx_block)); - if (!Stat_Done(status)) - { - lp->tx_link = tx_block; - return status; - } - else - { - lp->last_tx_restart = 0; - dev->stats.collisions += Stat_NoColl(status); - if (!Stat_OK(status)) - { - char *whatsup = NULL; - dev->stats.tx_errors++; - if (Stat_Abort(status)) - dev->stats.tx_aborted_errors++; - if (Stat_TNoCar(status)) { - whatsup = "aborted, no carrier"; - dev->stats.tx_carrier_errors++; - } - if (Stat_TNoCTS(status)) { - whatsup = "aborted, lost CTS"; - dev->stats.tx_carrier_errors++; - } - if (Stat_TNoDMA(status)) { - whatsup = "FIFO underran"; - dev->stats.tx_fifo_errors++; - } - if (Stat_TXColl(status)) { - whatsup = "aborted, too many collisions"; - dev->stats.tx_aborted_errors++; - } - if (whatsup) - printk(KERN_INFO "%s: transmit %s\n", - dev->name, whatsup); - } - else - dev->stats.tx_packets++; - } - if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE)) - lp->tx_reap = tx_block = TX_BUF_START; - else - lp->tx_reap = tx_block += TX_BUF_SIZE; - netif_wake_queue(dev); - } - while (lp->tx_reap != lp->tx_head); - - lp->tx_link = lp->tx_tail + 0x08; - - return status; -} - -/* - * This should never happen. It is called when some higher routine detects - * that the CU has stopped, to try to restart it from the last packet we knew - * we were working on, or the idle loop if we had finished for the time. - */ - -static void eexp_hw_txrestart(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short ioaddr = dev->base_addr; - - lp->last_tx_restart = lp->tx_link; - scb_wrcbl(dev, lp->tx_link); - scb_command(dev, SCB_CUstart); - outb(0,ioaddr+SIGNAL_CA); - - { - unsigned short boguscount=50,failcount=5; - while (!scb_status(dev)) - { - if (!--boguscount) - { - if (--failcount) - { - printk(KERN_WARNING "%s: CU start timed out, status %04x, cmd %04x\n", dev->name, scb_status(dev), scb_rdcmd(dev)); - scb_wrcbl(dev, lp->tx_link); - scb_command(dev, SCB_CUstart); - outb(0,ioaddr+SIGNAL_CA); - boguscount = 100; - } - else - { - printk(KERN_WARNING "%s: Failed to restart CU, resetting board...\n",dev->name); - eexp_hw_init586(dev); - netif_wake_queue(dev); - return; - } - } - } - } -} - -/* - * Writes down the list of transmit buffers into card memory. Each - * entry consists of an 82586 transmit command, followed by a jump - * pointing to itself. When we want to transmit a packet, we write - * the data into the appropriate transmit buffer and then modify the - * preceding jump to point at the new transmit command. This means that - * the 586 command unit is continuously active. - */ - -static void eexp_hw_txinit(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short tx_block = TX_BUF_START; - unsigned short curtbuf; - unsigned short ioaddr = dev->base_addr; - - for ( curtbuf=0 ; curtbuf<lp->num_tx_bufs ; curtbuf++ ) - { - outw(tx_block, ioaddr + WRITE_PTR); - - outw(0x0000, ioaddr + DATAPORT); - outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT); - outw(tx_block+0x08, ioaddr + DATAPORT); - outw(tx_block+0x0e, ioaddr + DATAPORT); - - outw(0x0000, ioaddr + DATAPORT); - outw(0x0000, ioaddr + DATAPORT); - outw(tx_block+0x08, ioaddr + DATAPORT); - - outw(0x8000, ioaddr + DATAPORT); - outw(-1, ioaddr + DATAPORT); - outw(tx_block+0x16, ioaddr + DATAPORT); - outw(0x0000, ioaddr + DATAPORT); - - tx_block += TX_BUF_SIZE; - } - lp->tx_head = TX_BUF_START; - lp->tx_reap = TX_BUF_START; - lp->tx_tail = tx_block - TX_BUF_SIZE; - lp->tx_link = lp->tx_tail + 0x08; - lp->rx_buf_start = tx_block; - -} - -/* - * Write the circular list of receive buffer descriptors to card memory. - * The end of the list isn't marked, which means that the 82586 receive - * unit will loop until buffers become available (this avoids it giving us - * "out of resources" messages). - */ - -static void eexp_hw_rxinit(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short rx_block = lp->rx_buf_start; - unsigned short ioaddr = dev->base_addr; - - lp->num_rx_bufs = 0; - lp->rx_first = lp->rx_ptr = rx_block; - do - { - lp->num_rx_bufs++; - - outw(rx_block, ioaddr + WRITE_PTR); - - outw(0, ioaddr + DATAPORT); outw(0, ioaddr+DATAPORT); - outw(rx_block + RX_BUF_SIZE, ioaddr+DATAPORT); - outw(0xffff, ioaddr+DATAPORT); - - outw(0x0000, ioaddr+DATAPORT); - outw(0xdead, ioaddr+DATAPORT); - outw(0xdead, ioaddr+DATAPORT); - outw(0xdead, ioaddr+DATAPORT); - outw(0xdead, ioaddr+DATAPORT); - outw(0xdead, ioaddr+DATAPORT); - outw(0xdead, ioaddr+DATAPORT); - - outw(0x0000, ioaddr+DATAPORT); - outw(rx_block + RX_BUF_SIZE + 0x16, ioaddr+DATAPORT); - outw(rx_block + 0x20, ioaddr+DATAPORT); - outw(0, ioaddr+DATAPORT); - outw(RX_BUF_SIZE-0x20, ioaddr+DATAPORT); - - lp->rx_last = rx_block; - rx_block += RX_BUF_SIZE; - } while (rx_block <= lp->rx_buf_end-RX_BUF_SIZE); - - - /* Make first Rx frame descriptor point to first Rx buffer - descriptor */ - outw(lp->rx_first + 6, ioaddr+WRITE_PTR); - outw(lp->rx_first + 0x16, ioaddr+DATAPORT); - - /* Close Rx frame descriptor ring */ - outw(lp->rx_last + 4, ioaddr+WRITE_PTR); - outw(lp->rx_first, ioaddr+DATAPORT); - - /* Close Rx buffer descriptor ring */ - outw(lp->rx_last + 0x16 + 2, ioaddr+WRITE_PTR); - outw(lp->rx_first + 0x16, ioaddr+DATAPORT); - -} - -/* - * Un-reset the 586, and start the configuration sequence. We don't wait for - * this to finish, but allow the interrupt handler to start the CU and RU for - * us. We can't start the receive/transmission system up before we know that - * the hardware is configured correctly. - */ - -static void eexp_hw_init586(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short ioaddr = dev->base_addr; - int i; - -#if NET_DEBUG > 6 - printk("%s: eexp_hw_init586()\n", dev->name); -#endif - - lp->started = 0; - - set_loopback(dev); - - outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ); - - /* Download the startup code */ - outw(lp->rx_buf_end & ~31, ioaddr + SM_PTR); - outw(lp->width?0x0001:0x0000, ioaddr + 0x8006); - outw(0x0000, ioaddr + 0x8008); - outw(0x0000, ioaddr + 0x800a); - outw(0x0000, ioaddr + 0x800c); - outw(0x0000, ioaddr + 0x800e); - - for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) { - int j; - outw(i, ioaddr + SM_PTR); - for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2) - outw(start_code[(i+j)/2], - ioaddr+0x4000+j); - for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2) - outw(start_code[(i+j+16)/2], - ioaddr+0x8000+j); - } - - /* Do we want promiscuous mode or multicast? */ - outw(CONF_PROMISC & ~31, ioaddr+SM_PTR); - i = inw(ioaddr+SHADOW(CONF_PROMISC)); - outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1), - ioaddr+SHADOW(CONF_PROMISC)); - lp->was_promisc = dev->flags & IFF_PROMISC; -#if 0 - eexp_setup_filter(dev); -#endif - - /* Write our hardware address */ - outw(CONF_HWADDR & ~31, ioaddr+SM_PTR); - outw(((unsigned short *)dev->dev_addr)[0], ioaddr+SHADOW(CONF_HWADDR)); - outw(((unsigned short *)dev->dev_addr)[1], - ioaddr+SHADOW(CONF_HWADDR+2)); - outw(((unsigned short *)dev->dev_addr)[2], - ioaddr+SHADOW(CONF_HWADDR+4)); - - eexp_hw_txinit(dev); - eexp_hw_rxinit(dev); - - outb(0,ioaddr+EEPROM_Ctrl); - mdelay(5); - - scb_command(dev, 0xf000); - outb(0,ioaddr+SIGNAL_CA); - - outw(0, ioaddr+SM_PTR); - - { - unsigned short rboguscount=50,rfailcount=5; - while (inw(ioaddr+0x4000)) - { - if (!--rboguscount) - { - printk(KERN_WARNING "%s: i82586 reset timed out, kicking...\n", - dev->name); - scb_command(dev, 0); - outb(0,ioaddr+SIGNAL_CA); - rboguscount = 100; - if (!--rfailcount) - { - printk(KERN_WARNING "%s: i82586 not responding, giving up.\n", - dev->name); - return; - } - } - } - } - - scb_wrcbl(dev, CONF_LINK); - scb_command(dev, 0xf000|SCB_CUstart); - outb(0,ioaddr+SIGNAL_CA); - - { - unsigned short iboguscount=50,ifailcount=5; - while (!scb_status(dev)) - { - if (!--iboguscount) - { - if (--ifailcount) - { - printk(KERN_WARNING "%s: i82586 initialization timed out, status %04x, cmd %04x\n", - dev->name, scb_status(dev), scb_rdcmd(dev)); - scb_wrcbl(dev, CONF_LINK); - scb_command(dev, 0xf000|SCB_CUstart); - outb(0,ioaddr+SIGNAL_CA); - iboguscount = 100; - } - else - { - printk(KERN_WARNING "%s: Failed to initialize i82586, giving up.\n",dev->name); - return; - } - } - } - } - - clear_loopback(dev); - outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ); - - lp->init_time = jiffies; -#if NET_DEBUG > 6 - printk("%s: leaving eexp_hw_init586()\n", dev->name); -#endif -} - -static void eexp_setup_filter(struct net_device *dev) -{ - struct netdev_hw_addr *ha; - unsigned short ioaddr = dev->base_addr; - int count = netdev_mc_count(dev); - int i; - if (count > 8) { - printk(KERN_INFO "%s: too many multicast addresses (%d)\n", - dev->name, count); - count = 8; - } - - outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR); - outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST)); - i = 0; - netdev_for_each_mc_addr(ha, dev) { - unsigned short *data = (unsigned short *) ha->addr; - - if (i == count) - break; - outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR); - outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i))); - outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR); - outw(data[1], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+2)); - outw((CONF_MULTICAST+(6*i)+4) & ~31, ioaddr+SM_PTR); - outw(data[2], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+4)); - i++; - } -} - -/* - * Set or clear the multicast filter for this adaptor. - */ -static void -eexp_set_multicast(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - struct net_local *lp = netdev_priv(dev); - int kick = 0, i; - if ((dev->flags & IFF_PROMISC) != lp->was_promisc) { - outw(CONF_PROMISC & ~31, ioaddr+SM_PTR); - i = inw(ioaddr+SHADOW(CONF_PROMISC)); - outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1), - ioaddr+SHADOW(CONF_PROMISC)); - lp->was_promisc = dev->flags & IFF_PROMISC; - kick = 1; - } - if (!(dev->flags & IFF_PROMISC)) { - eexp_setup_filter(dev); - if (lp->old_mc_count != netdev_mc_count(dev)) { - kick = 1; - lp->old_mc_count = netdev_mc_count(dev); - } - } - if (kick) { - unsigned long oj; - scb_command(dev, SCB_CUsuspend); - outb(0, ioaddr+SIGNAL_CA); - outb(0, ioaddr+SIGNAL_CA); -#if 0 - printk("%s: waiting for CU to go suspended\n", dev->name); -#endif - oj = jiffies; - while ((SCB_CUstat(scb_status(dev)) == 2) && - (time_before(jiffies, oj + 2000))); - if (SCB_CUstat(scb_status(dev)) == 2) - printk("%s: warning, CU didn't stop\n", dev->name); - lp->started &= ~(STARTED_CU); - scb_wrcbl(dev, CONF_LINK); - scb_command(dev, SCB_CUstart); - outb(0, ioaddr+SIGNAL_CA); - } -} - - -/* - * MODULE stuff - */ - -#ifdef MODULE - -#define EEXP_MAX_CARDS 4 /* max number of cards to support */ - -static struct net_device *dev_eexp[EEXP_MAX_CARDS]; -static int irq[EEXP_MAX_CARDS]; -static int io[EEXP_MAX_CARDS]; - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -MODULE_PARM_DESC(io, "EtherExpress 16 I/O base address(es)"); -MODULE_PARM_DESC(irq, "EtherExpress 16 IRQ number(s)"); -MODULE_LICENSE("GPL"); - - -/* Ideally the user would give us io=, irq= for every card. If any parameters - * are specified, we verify and then use them. If no parameters are given, we - * autoprobe for one card only. - */ -int __init init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) { - dev = alloc_etherdev(sizeof(struct net_local)); - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - if (io[this_dev] == 0) { - if (this_dev) - break; - printk(KERN_NOTICE "eexpress.c: Module autoprobe not recommended, give io=xx.\n"); - } - if (do_express_probe(dev) == 0) { - dev_eexp[this_dev] = dev; - found++; - continue; - } - printk(KERN_WARNING "eexpress.c: Failed to register card at 0x%x.\n", io[this_dev]); - free_netdev(dev); - break; - } - if (found) - return 0; - return -ENXIO; -} - -void __exit cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) { - struct net_device *dev = dev_eexp[this_dev]; - if (dev) { - unregister_netdev(dev); - free_netdev(dev); - } - } -} -#endif - -/* - * Local Variables: - * c-file-style: "linux" - * tab-width: 8 - * End: - */ diff --git a/drivers/net/ethernet/i825xx/eexpress.h b/drivers/net/ethernet/i825xx/eexpress.h deleted file mode 100644 index dc9c6ea289e9..000000000000 --- a/drivers/net/ethernet/i825xx/eexpress.h +++ /dev/null @@ -1,179 +0,0 @@ -/* - * eexpress.h: Intel EtherExpress16 defines - */ - -/* - * EtherExpress card register addresses - * as offsets from the base IO region (dev->base_addr) - */ - -#define DATAPORT 0x0000 -#define WRITE_PTR 0x0002 -#define READ_PTR 0x0004 -#define SIGNAL_CA 0x0006 -#define SET_IRQ 0x0007 -#define SM_PTR 0x0008 -#define MEM_Dec 0x000a -#define MEM_Ctrl 0x000b -#define MEM_Page_Ctrl 0x000c -#define Config 0x000d -#define EEPROM_Ctrl 0x000e -#define ID_PORT 0x000f -#define MEM_ECtrl 0x000f - -/* - * card register defines - */ - -/* SET_IRQ */ -#define SIRQ_en 0x08 -#define SIRQ_dis 0x00 - -/* EEPROM_Ctrl */ -#define EC_Clk 0x01 -#define EC_CS 0x02 -#define EC_Wr 0x04 -#define EC_Rd 0x08 -#define ASIC_RST 0x40 -#define i586_RST 0x80 - -#define eeprom_delay() { udelay(40); } - -/* - * i82586 Memory Configuration - */ - -/* (System Configuration Pointer) System start up block, read after 586_RST */ -#define SCP_START 0xfff6 - -/* Intermediate System Configuration Pointer */ -#define ISCP_START 0x0000 - -/* System Command Block */ -#define SCB_START 0x0008 - -/* Start of buffer region. Everything before this is used for control - * structures and the CU configuration program. The memory layout is - * determined in eexp_hw_probe(), once we know how much memory is - * available on the card. - */ - -#define TX_BUF_START 0x0100 - -#define TX_BUF_SIZE ((24+ETH_FRAME_LEN+31)&~0x1f) -#define RX_BUF_SIZE ((32+ETH_FRAME_LEN+31)&~0x1f) - -/* - * SCB defines - */ - -/* these functions take the SCB status word and test the relevant status bit */ -#define SCB_complete(s) (((s) & 0x8000) != 0) -#define SCB_rxdframe(s) (((s) & 0x4000) != 0) -#define SCB_CUdead(s) (((s) & 0x2000) != 0) -#define SCB_RUdead(s) (((s) & 0x1000) != 0) -#define SCB_ack(s) ((s) & 0xf000) - -/* Command unit status: 0=idle, 1=suspended, 2=active */ -#define SCB_CUstat(s) (((s)&0x0300)>>8) - -/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */ -#define SCB_RUstat(s) (((s)&0x0070)>>4) - -/* SCB commands */ -#define SCB_CUnop 0x0000 -#define SCB_CUstart 0x0100 -#define SCB_CUresume 0x0200 -#define SCB_CUsuspend 0x0300 -#define SCB_CUabort 0x0400 -#define SCB_resetchip 0x0080 - -#define SCB_RUnop 0x0000 -#define SCB_RUstart 0x0010 -#define SCB_RUresume 0x0020 -#define SCB_RUsuspend 0x0030 -#define SCB_RUabort 0x0040 - -/* - * Command block defines - */ - -#define Stat_Done(s) (((s) & 0x8000) != 0) -#define Stat_Busy(s) (((s) & 0x4000) != 0) -#define Stat_OK(s) (((s) & 0x2000) != 0) -#define Stat_Abort(s) (((s) & 0x1000) != 0) -#define Stat_STFail (((s) & 0x0800) != 0) -#define Stat_TNoCar(s) (((s) & 0x0400) != 0) -#define Stat_TNoCTS(s) (((s) & 0x0200) != 0) -#define Stat_TNoDMA(s) (((s) & 0x0100) != 0) -#define Stat_TDefer(s) (((s) & 0x0080) != 0) -#define Stat_TColl(s) (((s) & 0x0040) != 0) -#define Stat_TXColl(s) (((s) & 0x0020) != 0) -#define Stat_NoColl(s) ((s) & 0x000f) - -/* Cmd_END will end AFTER the command if this is the first - * command block after an SCB_CUstart, but BEFORE the command - * for all subsequent commands. Best strategy is to place - * Cmd_INT on the last command in the sequence, followed by a - * dummy Cmd_Nop with Cmd_END after this. - */ - -#define Cmd_END 0x8000 -#define Cmd_SUS 0x4000 -#define Cmd_INT 0x2000 - -#define Cmd_Nop 0x0000 -#define Cmd_SetAddr 0x0001 -#define Cmd_Config 0x0002 -#define Cmd_MCast 0x0003 -#define Cmd_Xmit 0x0004 -#define Cmd_TDR 0x0005 -#define Cmd_Dump 0x0006 -#define Cmd_Diag 0x0007 - - -/* - * Frame Descriptor (Receive block) defines - */ - -#define FD_Done(s) (((s) & 0x8000) != 0) -#define FD_Busy(s) (((s) & 0x4000) != 0) -#define FD_OK(s) (((s) & 0x2000) != 0) - -#define FD_CRC(s) (((s) & 0x0800) != 0) -#define FD_Align(s) (((s) & 0x0400) != 0) -#define FD_Resrc(s) (((s) & 0x0200) != 0) -#define FD_DMA(s) (((s) & 0x0100) != 0) -#define FD_Short(s) (((s) & 0x0080) != 0) -#define FD_NoEOF(s) (((s) & 0x0040) != 0) - -struct rfd_header { - volatile unsigned long flags; - volatile unsigned short link; - volatile unsigned short rbd_offset; - volatile unsigned short dstaddr1; - volatile unsigned short dstaddr2; - volatile unsigned short dstaddr3; - volatile unsigned short srcaddr1; - volatile unsigned short srcaddr2; - volatile unsigned short srcaddr3; - volatile unsigned short length; - - /* This is actually a Receive Buffer Descriptor. The way we - * arrange memory means that an RBD always follows the RFD that - * points to it, so they might as well be in the same structure. - */ - volatile unsigned short actual_count; - volatile unsigned short next_rbd; - volatile unsigned short buf_addr1; - volatile unsigned short buf_addr2; - volatile unsigned short size; -}; - -/* Returned data from the Time Domain Reflectometer */ - -#define TDR_LINKOK (1<<15) -#define TDR_XCVRPROBLEM (1<<14) -#define TDR_OPEN (1<<13) -#define TDR_SHORT (1<<12) -#define TDR_TIME 0x7ff diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c deleted file mode 100644 index 3735bfa53600..000000000000 --- a/drivers/net/ethernet/i825xx/lp486e.c +++ /dev/null @@ -1,1337 +0,0 @@ -/* Intel Professional Workstation/panther ethernet driver */ -/* lp486e.c: A panther 82596 ethernet driver for linux. */ -/* - History and copyrights: - - Driver skeleton - Written 1993 by Donald Becker. - Copyright 1993 United States Government as represented by the Director, - National Security Agency. This software may only be used and - distributed according to the terms of the GNU General Public License - as modified by SRC, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - Apricot - Written 1994 by Mark Evans. - This driver is for the Apricot 82596 bus-master interface - - Modularised 12/94 Mark Evans - - Professional Workstation - Derived from apricot.c by Ard van Breemen - <ard@murphy.nl>|<ard@cstmel.hobby.nl>|<ard@cstmel.nl.eu.org> - - Credits: - Thanks to Murphy Software BV for letting me write this in their time. - Well, actually, I get paid doing this... - (Also: see http://www.murphy.nl for murphy, and my homepage ~ard for - more information on the Professional Workstation) - - Present version - aeb@cwi.nl -*/ -/* - There are currently two motherboards that I know of in the - professional workstation. The only one that I know is the - intel panther motherboard. -- ard -*/ -/* -The pws is equipped with an intel 82596. This is a very intelligent controller -which runs its own micro-code. Communication with the hostprocessor is done -through linked lists of commands and buffers in the hostprocessors memory. -A complete description of the 82596 is available from intel. Search for -a file called "29021806.pdf". It is a complete description of the chip itself. -To use it for the pws some additions are needed regarding generation of -the PORT and CA signal, and the interrupt glue needed for a pc. -I/O map: -PORT SIZE ACTION MEANING -0xCB0 2 WRITE Lower 16 bits for PORT command -0xCB2 2 WRITE Upper 16 bits for PORT command, and issue of PORT command -0xCB4 1 WRITE Generation of CA signal -0xCB8 1 WRITE Clear interrupt glue -All other communication is through memory! -*/ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/bitops.h> - -#include <asm/io.h> -#include <asm/dma.h> - -#define DRV_NAME "lp486e" - -/* debug print flags */ -#define LOG_SRCDST 0x80000000 -#define LOG_STATINT 0x40000000 -#define LOG_STARTINT 0x20000000 - -#define i596_debug debug - -static int i596_debug = 0; - -static const char * const medianame[] = { - "10baseT", "AUI", - "10baseT-FD", "AUI-FD", -}; - -#define LP486E_TOTAL_SIZE 16 - -#define I596_NULL (0xffffffff) - -#define CMD_EOL 0x8000 /* The last command of the list, stop. */ -#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ -#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ - -#define CMD_FLEX 0x0008 /* Enable flexible memory model */ - -enum commands { - CmdNOP = 0, - CmdIASetup = 1, - CmdConfigure = 2, - CmdMulticastList = 3, - CmdTx = 4, - CmdTDR = 5, - CmdDump = 6, - CmdDiagnose = 7 -}; - -#if 0 -static const char *CUcmdnames[8] = { "NOP", "IASetup", "Configure", "MulticastList", - "Tx", "TDR", "Dump", "Diagnose" }; -#endif - -/* Status word bits */ -#define STAT_CX 0x8000 /* The CU finished executing a command - with the Interrupt bit set */ -#define STAT_FR 0x4000 /* The RU finished receiving a frame */ -#define STAT_CNA 0x2000 /* The CU left the active state */ -#define STAT_RNR 0x1000 /* The RU left the active state */ -#define STAT_ACK (STAT_CX | STAT_FR | STAT_CNA | STAT_RNR) -#define STAT_CUS 0x0700 /* Status of CU: 0: idle, 1: suspended, - 2: active, 3-7: unused */ -#define STAT_RUS 0x00f0 /* Status of RU: 0: idle, 1: suspended, - 2: no resources, 4: ready, - 10: no resources due to no more RBDs, - 12: no more RBDs, other: unused */ -#define STAT_T 0x0008 /* Bus throttle timers loaded */ -#define STAT_ZERO 0x0807 /* Always zero */ - -#if 0 -static char *CUstates[8] = { - "idle", "suspended", "active", 0, 0, 0, 0, 0 -}; -static char *RUstates[16] = { - "idle", "suspended", "no resources", 0, "ready", 0, 0, 0, - 0, 0, "no RBDs", 0, "out of RBDs", 0, 0, 0 -}; - -static void -i596_out_status(int status) { - int bad = 0; - char *s; - - printk("status %4.4x:", status); - if (status == 0xffff) - printk(" strange..\n"); - else { - if (status & STAT_CX) - printk(" CU done"); - if (status & STAT_CNA) - printk(" CU stopped"); - if (status & STAT_FR) - printk(" got a frame"); - if (status & STAT_RNR) - printk(" RU stopped"); - if (status & STAT_T) - printk(" throttled"); - if (status & STAT_ZERO) - bad = 1; - s = CUstates[(status & STAT_CUS) >> 8]; - if (!s) - bad = 1; - else - printk(" CU(%s)", s); - s = RUstates[(status & STAT_RUS) >> 4]; - if (!s) - bad = 1; - else - printk(" RU(%s)", s); - if (bad) - printk(" bad status"); - printk("\n"); - } -} -#endif - -/* Command word bits */ -#define ACK_CX 0x8000 -#define ACK_FR 0x4000 -#define ACK_CNA 0x2000 -#define ACK_RNR 0x1000 - -#define CUC_START 0x0100 -#define CUC_RESUME 0x0200 -#define CUC_SUSPEND 0x0300 -#define CUC_ABORT 0x0400 - -#define RX_START 0x0010 -#define RX_RESUME 0x0020 -#define RX_SUSPEND 0x0030 -#define RX_ABORT 0x0040 - -typedef u32 phys_addr; - -static inline phys_addr -va_to_pa(void *x) { - return x ? virt_to_bus(x) : I596_NULL; -} - -static inline void * -pa_to_va(phys_addr x) { - return (x == I596_NULL) ? NULL : bus_to_virt(x); -} - -/* status bits for cmd */ -#define CMD_STAT_C 0x8000 /* CU command complete */ -#define CMD_STAT_B 0x4000 /* CU command in progress */ -#define CMD_STAT_OK 0x2000 /* CU command completed without errors */ -#define CMD_STAT_A 0x1000 /* CU command abnormally terminated */ - -struct i596_cmd { /* 8 bytes */ - unsigned short status; - unsigned short command; - phys_addr pa_next; /* va_to_pa(struct i596_cmd *next) */ -}; - -#define EOF 0x8000 -#define SIZE_MASK 0x3fff - -struct i596_tbd { - unsigned short size; - unsigned short pad; - phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */ - phys_addr pa_data; /* va_to_pa(char *data) */ - struct sk_buff *skb; -}; - -struct tx_cmd { - struct i596_cmd cmd; - phys_addr pa_tbd; /* va_to_pa(struct i596_tbd *tbd) */ - unsigned short size; - unsigned short pad; -}; - -/* status bits for rfd */ -#define RFD_STAT_C 0x8000 /* Frame reception complete */ -#define RFD_STAT_B 0x4000 /* Frame reception in progress */ -#define RFD_STAT_OK 0x2000 /* Frame received without errors */ -#define RFD_STATUS 0x1fff -#define RFD_LENGTH_ERR 0x1000 -#define RFD_CRC_ERR 0x0800 -#define RFD_ALIGN_ERR 0x0400 -#define RFD_NOBUFS_ERR 0x0200 -#define RFD_DMA_ERR 0x0100 /* DMA overrun failure to acquire system bus */ -#define RFD_SHORT_FRAME_ERR 0x0080 -#define RFD_NOEOP_ERR 0x0040 -#define RFD_TRUNC_ERR 0x0020 -#define RFD_MULTICAST 0x0002 /* 0: destination had our address - 1: destination was broadcast/multicast */ -#define RFD_COLLISION 0x0001 - -/* receive frame descriptor */ -struct i596_rfd { - unsigned short stat; - unsigned short cmd; - phys_addr pa_next; /* va_to_pa(struct i596_rfd *next) */ - phys_addr pa_rbd; /* va_to_pa(struct i596_rbd *rbd) */ - unsigned short count; - unsigned short size; - char data[1532]; -}; - -#define RBD_EL 0x8000 -#define RBD_P 0x4000 -#define RBD_SIZEMASK 0x3fff -#define RBD_EOF 0x8000 -#define RBD_F 0x4000 - -/* receive buffer descriptor */ -struct i596_rbd { - unsigned short size; - unsigned short pad; - phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */ - phys_addr pa_data; /* va_to_pa(char *data) */ - phys_addr pa_prev; /* va_to_pa(struct i596_tbd *prev) */ - - /* Driver private part */ - struct sk_buff *skb; -}; - -#define RX_RING_SIZE 64 -#define RX_SKBSIZE (ETH_FRAME_LEN+10) -#define RX_RBD_SIZE 32 - -/* System Control Block - 40 bytes */ -struct i596_scb { - u16 status; /* 0 */ - u16 command; /* 2 */ - phys_addr pa_cmd; /* 4 - va_to_pa(struct i596_cmd *cmd) */ - phys_addr pa_rfd; /* 8 - va_to_pa(struct i596_rfd *rfd) */ - u32 crc_err; /* 12 */ - u32 align_err; /* 16 */ - u32 resource_err; /* 20 */ - u32 over_err; /* 24 */ - u32 rcvdt_err; /* 28 */ - u32 short_err; /* 32 */ - u16 t_on; /* 36 */ - u16 t_off; /* 38 */ -}; - -/* Intermediate System Configuration Pointer - 8 bytes */ -struct i596_iscp { - u32 busy; /* 0 */ - phys_addr pa_scb; /* 4 - va_to_pa(struct i596_scb *scb) */ -}; - -/* System Configuration Pointer - 12 bytes */ -struct i596_scp { - u32 sysbus; /* 0 */ - u32 pad; /* 4 */ - phys_addr pa_iscp; /* 8 - va_to_pa(struct i596_iscp *iscp) */ -}; - -/* Selftest and dump results - needs 16-byte alignment */ -/* - * The size of the dump area is 304 bytes. When the dump is executed - * by the Port command an extra word will be appended to the dump area. - * The extra word is a copy of the Dump status word (containing the - * C, B, OK bits). [I find 0xa006, with a0 for C+OK and 6 for dump] - */ -struct i596_dump { - u16 dump[153]; /* (304 = 130h) + 2 bytes */ -}; - -struct i596_private { /* aligned to a 16-byte boundary */ - struct i596_scp scp; /* 0 - needs 16-byte alignment */ - struct i596_iscp iscp; /* 12 */ - struct i596_scb scb; /* 20 */ - u32 dummy; /* 60 */ - struct i596_dump dump; /* 64 - needs 16-byte alignment */ - - struct i596_cmd set_add; - char eth_addr[8]; /* directly follows set_add */ - - struct i596_cmd set_conf; - char i596_config[16]; /* directly follows set_conf */ - - struct i596_cmd tdr; - unsigned long tdr_stat; /* directly follows tdr */ - - int last_restart; - struct i596_rbd *rbd_list; - struct i596_rbd *rbd_tail; - struct i596_rfd *rx_tail; - struct i596_cmd *cmd_tail; - struct i596_cmd *cmd_head; - int cmd_backlog; - unsigned long last_cmd; - spinlock_t cmd_lock; -}; - -static char init_setup[14] = { - 0x8E, /* length 14 bytes, prefetch on */ - 0xC8, /* default: fifo to 8, monitor off */ - 0x40, /* default: don't save bad frames (apricot.c had 0x80) */ - 0x2E, /* (default is 0x26) - No source address insertion, 8 byte preamble */ - 0x00, /* default priority and backoff */ - 0x60, /* default interframe spacing */ - 0x00, /* default slot time LSB */ - 0xf2, /* default slot time and nr of retries */ - 0x00, /* default various bits - (0: promiscuous mode, 1: broadcast disable, - 2: encoding mode, 3: transmit on no CRS, - 4: no CRC insertion, 5: CRC type, - 6: bit stuffing, 7: padding) */ - 0x00, /* default carrier sense and collision detect */ - 0x40, /* default minimum frame length */ - 0xff, /* (default is 0xff, and that is what apricot.c has; - elp486.c has 0xfb: Enable crc append in memory.) */ - 0x00, /* default: not full duplex */ - 0x7f /* (default is 0x3f) multi IA */ -}; - -static int i596_open(struct net_device *dev); -static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); -static irqreturn_t i596_interrupt(int irq, void *dev_id); -static int i596_close(struct net_device *dev); -static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); -static void print_eth(char *); -static void set_multicast_list(struct net_device *dev); -static void i596_tx_timeout(struct net_device *dev); - -static int -i596_timeout(struct net_device *dev, char *msg, int ct) { - struct i596_private *lp; - int boguscnt = ct; - - lp = netdev_priv(dev); - while (lp->scb.command) { - if (--boguscnt == 0) { - printk("%s: %s timed out - stat %4.4x, cmd %4.4x\n", - dev->name, msg, - lp->scb.status, lp->scb.command); - return 1; - } - udelay(5); - barrier(); - } - return 0; -} - -static inline int -init_rx_bufs(struct net_device *dev, int num) { - struct i596_private *lp; - struct i596_rfd *rfd; - int i; - // struct i596_rbd *rbd; - - lp = netdev_priv(dev); - lp->scb.pa_rfd = I596_NULL; - - for (i = 0; i < num; i++) { - rfd = kmalloc(sizeof(struct i596_rfd), GFP_KERNEL); - if (rfd == NULL) - break; - - rfd->stat = 0; - rfd->pa_rbd = I596_NULL; - rfd->count = 0; - rfd->size = 1532; - if (i == 0) { - rfd->cmd = CMD_EOL; - lp->rx_tail = rfd; - } else { - rfd->cmd = 0; - } - rfd->pa_next = lp->scb.pa_rfd; - lp->scb.pa_rfd = va_to_pa(rfd); - lp->rx_tail->pa_next = lp->scb.pa_rfd; - } - -#if 0 - for (i = 0; i<RX_RBD_SIZE; i++) { - rbd = kmalloc(sizeof(struct i596_rbd), GFP_KERNEL); - if (rbd) { - rbd->pad = 0; - rbd->count = 0; - rbd->skb = dev_alloc_skb(RX_SKBSIZE); - if (!rbd->skb) { - printk("dev_alloc_skb failed"); - } - rbd->next = rfd->rbd; - if (i) { - rfd->rbd->prev = rbd; - rbd->size = RX_SKBSIZE; - } else { - rbd->size = (RX_SKBSIZE | RBD_EL); - lp->rbd_tail = rbd; - } - - rfd->rbd = rbd; - } - } - lp->rbd_tail->next = rfd->rbd; -#endif - return i; -} - -static inline void -remove_rx_bufs(struct net_device *dev) { - struct i596_private *lp; - struct i596_rfd *rfd; - - lp = netdev_priv(dev); - lp->rx_tail->pa_next = I596_NULL; - - do { - rfd = pa_to_va(lp->scb.pa_rfd); - lp->scb.pa_rfd = rfd->pa_next; - kfree(rfd); - } while (rfd != lp->rx_tail); - - lp->rx_tail = NULL; - -#if 0 - for (lp->rbd_list) { - } -#endif -} - -#define PORT_RESET 0x00 /* reset 82596 */ -#define PORT_SELFTEST 0x01 /* selftest */ -#define PORT_ALTSCP 0x02 /* alternate SCB address */ -#define PORT_DUMP 0x03 /* dump */ - -#define IOADDR 0xcb0 /* real constant */ -#define IRQ 10 /* default IRQ - can be changed by ECU */ - -/* The 82596 requires two 16-bit write cycles for a port command */ -static inline void -PORT(phys_addr a, unsigned int cmd) { - if (a & 0xf) - printk("lp486e.c: PORT: address not aligned\n"); - outw(((a & 0xffff) | cmd), IOADDR); - outw(((a>>16) & 0xffff), IOADDR+2); -} - -static inline void -CA(void) { - outb(0, IOADDR+4); - udelay(8); -} - -static inline void -CLEAR_INT(void) { - outb(0, IOADDR+8); -} - -#if 0 -/* selftest or dump */ -static void -i596_port_do(struct net_device *dev, int portcmd, char *cmdname) { - struct i596_private *lp = netdev_priv(dev); - u16 *outp; - int i, m; - - memset((void *)&(lp->dump), 0, sizeof(struct i596_dump)); - outp = &(lp->dump.dump[0]); - - PORT(va_to_pa(outp), portcmd); - mdelay(30); /* random, unmotivated */ - - printk("lp486e i82596 %s result:\n", cmdname); - for (m = ARRAY_SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--) - ; - for (i = 0; i < m; i++) { - printk(" %04x", lp->dump.dump[i]); - if (i%8 == 7) - printk("\n"); - } - printk("\n"); -} -#endif - -static int -i596_scp_setup(struct net_device *dev) { - struct i596_private *lp = netdev_priv(dev); - int boguscnt; - - /* Setup SCP, ISCP, SCB */ - /* - * sysbus bits: - * only a single byte is significant - here 0x44 - * 0x80: big endian mode (details depend on stepping) - * 0x40: 1 - * 0x20: interrupt pin is active low - * 0x10: lock function disabled - * 0x08: external triggering of bus throttle timers - * 0x06: 00: 82586 compat mode, 01: segmented mode, 10: linear mode - * 0x01: unused - */ - lp->scp.sysbus = 0x00440000; /* linear mode */ - lp->scp.pad = 0; /* must be zero */ - lp->scp.pa_iscp = va_to_pa(&(lp->iscp)); - - /* - * The CPU sets the ISCP to 1 before it gives the first CA() - */ - lp->iscp.busy = 0x0001; - lp->iscp.pa_scb = va_to_pa(&(lp->scb)); - - lp->scb.command = 0; - lp->scb.status = 0; - lp->scb.pa_cmd = I596_NULL; - /* lp->scb.pa_rfd has been initialised already */ - - lp->last_cmd = jiffies; - lp->cmd_backlog = 0; - lp->cmd_head = NULL; - - /* - * Reset the 82596. - * We need to wait 10 systemclock cycles, and - * 5 serial clock cycles. - */ - PORT(0, PORT_RESET); /* address part ignored */ - udelay(100); - - /* - * Before the CA signal is asserted, the default SCP address - * (0x00fffff4) can be changed to a 16-byte aligned value - */ - PORT(va_to_pa(&lp->scp), PORT_ALTSCP); /* change the scp address */ - - /* - * The initialization procedure begins when a - * Channel Attention signal is asserted after a reset. - */ - - CA(); - - /* - * The ISCP busy is cleared by the 82596 after the SCB address is read. - */ - boguscnt = 100; - while (lp->iscp.busy) { - if (--boguscnt == 0) { - /* No i82596 present? */ - printk("%s: i82596 initialization timed out\n", - dev->name); - return 1; - } - udelay(5); - barrier(); - } - /* I find here boguscnt==100, so no delay was required. */ - - return 0; -} - -static int -init_i596(struct net_device *dev) { - struct i596_private *lp; - - if (i596_scp_setup(dev)) - return 1; - - lp = netdev_priv(dev); - lp->scb.command = 0; - - memcpy ((void *)lp->i596_config, init_setup, 14); - lp->set_conf.command = CmdConfigure; - i596_add_cmd(dev, (void *)&lp->set_conf); - - memcpy ((void *)lp->eth_addr, dev->dev_addr, 6); - lp->set_add.command = CmdIASetup; - i596_add_cmd(dev, &lp->set_add); - - lp->tdr.command = CmdTDR; - i596_add_cmd(dev, &lp->tdr); - - if (lp->scb.command && i596_timeout(dev, "i82596 init", 200)) - return 1; - - lp->scb.command = RX_START; - CA(); - - barrier(); - - if (lp->scb.command && i596_timeout(dev, "Receive Unit start", 100)) - return 1; - - return 0; -} - -/* Receive a single frame */ -static inline int -i596_rx_one(struct net_device *dev, struct i596_private *lp, - struct i596_rfd *rfd, int *frames) { - - if (rfd->stat & RFD_STAT_OK) { - /* a good frame */ - int pkt_len = (rfd->count & 0x3fff); - struct sk_buff *skb = netdev_alloc_skb(dev, pkt_len); - - (*frames)++; - - if (rfd->cmd & CMD_EOL) - printk("Received on EOL\n"); - - if (skb == NULL) { - printk ("%s: i596_rx Memory squeeze, " - "dropping packet.\n", dev->name); - dev->stats.rx_dropped++; - return 1; - } - - memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len); - - skb->protocol = eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - } else { -#if 0 - printk("Frame reception error status %04x\n", - rfd->stat); -#endif - dev->stats.rx_errors++; - if (rfd->stat & RFD_COLLISION) - dev->stats.collisions++; - if (rfd->stat & RFD_SHORT_FRAME_ERR) - dev->stats.rx_length_errors++; - if (rfd->stat & RFD_DMA_ERR) - dev->stats.rx_over_errors++; - if (rfd->stat & RFD_NOBUFS_ERR) - dev->stats.rx_fifo_errors++; - if (rfd->stat & RFD_ALIGN_ERR) - dev->stats.rx_frame_errors++; - if (rfd->stat & RFD_CRC_ERR) - dev->stats.rx_crc_errors++; - if (rfd->stat & RFD_LENGTH_ERR) - dev->stats.rx_length_errors++; - } - rfd->stat = rfd->count = 0; - return 0; -} - -static int -i596_rx(struct net_device *dev) { - struct i596_private *lp = netdev_priv(dev); - struct i596_rfd *rfd; - int frames = 0; - - while (1) { - rfd = pa_to_va(lp->scb.pa_rfd); - if (!rfd) { - printk(KERN_ERR "i596_rx: NULL rfd?\n"); - return 0; - } -#if 1 - if (rfd->stat && !(rfd->stat & (RFD_STAT_C | RFD_STAT_B))) - printk("SF:%p-%04x\n", rfd, rfd->stat); -#endif - if (!(rfd->stat & RFD_STAT_C)) - break; /* next one not ready */ - if (i596_rx_one(dev, lp, rfd, &frames)) - break; /* out of memory */ - rfd->cmd = CMD_EOL; - lp->rx_tail->cmd = 0; - lp->rx_tail = rfd; - lp->scb.pa_rfd = rfd->pa_next; - barrier(); - } - - return frames; -} - -static void -i596_cleanup_cmd(struct net_device *dev) { - struct i596_private *lp; - struct i596_cmd *cmd; - - lp = netdev_priv(dev); - while (lp->cmd_head) { - cmd = lp->cmd_head; - - lp->cmd_head = pa_to_va(lp->cmd_head->pa_next); - lp->cmd_backlog--; - - switch ((cmd->command) & 0x7) { - case CmdTx: { - struct tx_cmd *tx_cmd = (struct tx_cmd *) cmd; - struct i596_tbd * tx_cmd_tbd; - tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd); - - dev_kfree_skb_any(tx_cmd_tbd->skb); - - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; - - cmd->pa_next = I596_NULL; - kfree((unsigned char *)tx_cmd); - netif_wake_queue(dev); - break; - } - case CmdMulticastList: { - // unsigned short count = *((unsigned short *) (ptr + 1)); - - cmd->pa_next = I596_NULL; - kfree((unsigned char *)cmd); - break; - } - default: { - cmd->pa_next = I596_NULL; - break; - } - } - barrier(); - } - - if (lp->scb.command && i596_timeout(dev, "i596_cleanup_cmd", 100)) - ; - - lp->scb.pa_cmd = va_to_pa(lp->cmd_head); -} - -static void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) { - - if (lp->scb.command && i596_timeout(dev, "i596_reset", 100)) - ; - - netif_stop_queue(dev); - - lp->scb.command = CUC_ABORT | RX_ABORT; - CA(); - barrier(); - - /* wait for shutdown */ - if (lp->scb.command && i596_timeout(dev, "i596_reset(2)", 400)) - ; - - i596_cleanup_cmd(dev); - i596_rx(dev); - - netif_start_queue(dev); - /*dev_kfree_skb(skb, FREE_WRITE);*/ - init_i596(dev); -} - -static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) { - struct i596_private *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - unsigned long flags; - - cmd->status = 0; - cmd->command |= (CMD_EOL | CMD_INTR); - cmd->pa_next = I596_NULL; - - spin_lock_irqsave(&lp->cmd_lock, flags); - - if (lp->cmd_head) { - lp->cmd_tail->pa_next = va_to_pa(cmd); - } else { - lp->cmd_head = cmd; - if (lp->scb.command && i596_timeout(dev, "i596_add_cmd", 100)) - ; - lp->scb.pa_cmd = va_to_pa(cmd); - lp->scb.command = CUC_START; - CA(); - } - lp->cmd_tail = cmd; - lp->cmd_backlog++; - - lp->cmd_head = pa_to_va(lp->scb.pa_cmd); - spin_unlock_irqrestore(&lp->cmd_lock, flags); - - if (lp->cmd_backlog > 16) { - int tickssofar = jiffies - lp->last_cmd; - if (tickssofar < HZ/4) - return; - - printk(KERN_WARNING "%s: command unit timed out, status resetting.\n", dev->name); - i596_reset(dev, lp, ioaddr); - } -} - -static int i596_open(struct net_device *dev) -{ - int i; - - i = request_irq(dev->irq, i596_interrupt, IRQF_SHARED, dev->name, dev); - if (i) { - printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); - return i; - } - - if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE) - printk(KERN_ERR "%s: only able to allocate %d receive buffers\n", dev->name, i); - - if (i < 4) { - free_irq(dev->irq, dev); - return -EAGAIN; - } - netif_start_queue(dev); - init_i596(dev); - return 0; /* Always succeed */ -} - -static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { - struct tx_cmd *tx_cmd; - short length; - - length = skb->len; - - if (length < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - length = ETH_ZLEN; - } - - tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC); - if (tx_cmd == NULL) { - printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name); - dev->stats.tx_dropped++; - dev_kfree_skb (skb); - } else { - struct i596_tbd *tx_cmd_tbd; - tx_cmd_tbd = (struct i596_tbd *) (tx_cmd + 1); - tx_cmd->pa_tbd = va_to_pa (tx_cmd_tbd); - tx_cmd_tbd->pa_next = I596_NULL; - - tx_cmd->cmd.command = (CMD_FLEX | CmdTx); - - tx_cmd->pad = 0; - tx_cmd->size = 0; - tx_cmd_tbd->pad = 0; - tx_cmd_tbd->size = (EOF | length); - - tx_cmd_tbd->pa_data = va_to_pa (skb->data); - tx_cmd_tbd->skb = skb; - - if (i596_debug & LOG_SRCDST) - print_eth (skb->data); - - i596_add_cmd (dev, (struct i596_cmd *) tx_cmd); - - dev->stats.tx_packets++; - } - - return NETDEV_TX_OK; -} - -static void -i596_tx_timeout (struct net_device *dev) { - struct i596_private *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - /* Transmitter timeout, serious problems. */ - printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name); - dev->stats.tx_errors++; - - /* Try to restart the adaptor */ - if (lp->last_restart == dev->stats.tx_packets) { - printk ("Resetting board.\n"); - - /* Shutdown and restart */ - i596_reset (dev, lp, ioaddr); - } else { - /* Issue a channel attention signal */ - printk ("Kicking board.\n"); - lp->scb.command = (CUC_START | RX_START); - CA(); - lp->last_restart = dev->stats.tx_packets; - } - netif_wake_queue(dev); -} - -static void print_eth(char *add) -{ - int i; - - printk ("Dest "); - for (i = 0; i < 6; i++) - printk(" %2.2X", (unsigned char) add[i]); - printk ("\n"); - - printk ("Source"); - for (i = 0; i < 6; i++) - printk(" %2.2X", (unsigned char) add[i+6]); - printk ("\n"); - - printk ("type %2.2X%2.2X\n", - (unsigned char) add[12], (unsigned char) add[13]); -} - -static const struct net_device_ops i596_netdev_ops = { - .ndo_open = i596_open, - .ndo_stop = i596_close, - .ndo_start_xmit = i596_start_xmit, - .ndo_set_rx_mode = set_multicast_list, - .ndo_tx_timeout = i596_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __init lp486e_probe(struct net_device *dev) { - struct i596_private *lp; - unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 }; - unsigned char *bios; - int i, j; - int ret = -ENOMEM; - static int probed; - - if (probed) - return -ENODEV; - probed++; - - if (!request_region(IOADDR, LP486E_TOTAL_SIZE, DRV_NAME)) { - printk(KERN_ERR "lp486e: IO address 0x%x in use\n", IOADDR); - return -EBUSY; - } - - lp = netdev_priv(dev); - spin_lock_init(&lp->cmd_lock); - - /* - * Do we really have this thing? - */ - if (i596_scp_setup(dev)) { - ret = -ENODEV; - goto err_out_kfree; - } - - dev->base_addr = IOADDR; - dev->irq = IRQ; - - - /* - * How do we find the ethernet address? I don't know. - * One possibility is to look at the EISA configuration area - * [0xe8000-0xe9fff]. This contains the ethernet address - * but not at a fixed address - things depend on setup options. - * - * If we find no address, or the wrong address, use - * ifconfig eth0 hw ether a1:a2:a3:a4:a5:a6 - * with the value found in the BIOS setup. - */ - bios = bus_to_virt(0xe8000); - for (j = 0; j < 0x2000; j++) { - if (bios[j] == 0 && bios[j+1] == 0xaa && bios[j+2] == 0) { - printk("%s: maybe address at BIOS 0x%x:", - dev->name, 0xe8000+j); - for (i = 0; i < 6; i++) { - eth_addr[i] = bios[i+j]; - printk(" %2.2X", eth_addr[i]); - } - printk("\n"); - } - } - - printk("%s: lp486e 82596 at %#3lx, IRQ %d,", - dev->name, dev->base_addr, dev->irq); - for (i = 0; i < 6; i++) - printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]); - printk("\n"); - - /* The LP486E-specific entries in the device structure. */ - dev->netdev_ops = &i596_netdev_ops; - dev->watchdog_timeo = 5*HZ; - -#if 0 - /* selftest reports 0x320925ae - don't know what that means */ - i596_port_do(dev, PORT_SELFTEST, "selftest"); - i596_port_do(dev, PORT_DUMP, "dump"); -#endif - return 0; - -err_out_kfree: - release_region(IOADDR, LP486E_TOTAL_SIZE); - return ret; -} - -static inline void -i596_handle_CU_completion(struct net_device *dev, - struct i596_private *lp, - unsigned short status, - unsigned short *ack_cmdp) { - struct i596_cmd *cmd; - int frames_out = 0; - int commands_done = 0; - int cmd_val; - unsigned long flags; - - spin_lock_irqsave(&lp->cmd_lock, flags); - cmd = lp->cmd_head; - - while (lp->cmd_head && (lp->cmd_head->status & CMD_STAT_C)) { - cmd = lp->cmd_head; - - lp->cmd_head = pa_to_va(lp->cmd_head->pa_next); - lp->cmd_backlog--; - - commands_done++; - cmd_val = cmd->command & 0x7; -#if 0 - printk("finished CU %s command (%d)\n", - CUcmdnames[cmd_val], cmd_val); -#endif - switch (cmd_val) { - case CmdTx: - { - struct tx_cmd *tx_cmd; - struct i596_tbd *tx_cmd_tbd; - - tx_cmd = (struct tx_cmd *) cmd; - tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd); - - frames_out++; - if (cmd->status & CMD_STAT_OK) { - if (i596_debug) - print_eth(pa_to_va(tx_cmd_tbd->pa_data)); - } else { - dev->stats.tx_errors++; - if (i596_debug) - printk("transmission failure:%04x\n", - cmd->status); - if (cmd->status & 0x0020) - dev->stats.collisions++; - if (!(cmd->status & 0x0040)) - dev->stats.tx_heartbeat_errors++; - if (cmd->status & 0x0400) - dev->stats.tx_carrier_errors++; - if (cmd->status & 0x0800) - dev->stats.collisions++; - if (cmd->status & 0x1000) - dev->stats.tx_aborted_errors++; - } - dev_kfree_skb_irq(tx_cmd_tbd->skb); - - cmd->pa_next = I596_NULL; - kfree((unsigned char *)tx_cmd); - netif_wake_queue(dev); - break; - } - - case CmdMulticastList: - cmd->pa_next = I596_NULL; - kfree((unsigned char *)cmd); - break; - - case CmdTDR: - { - unsigned long status = *((unsigned long *) (cmd + 1)); - if (status & 0x8000) { - if (i596_debug) - printk("%s: link ok.\n", dev->name); - } else { - if (status & 0x4000) - printk("%s: Transceiver problem.\n", - dev->name); - if (status & 0x2000) - printk("%s: Termination problem.\n", - dev->name); - if (status & 0x1000) - printk("%s: Short circuit.\n", - dev->name); - printk("%s: Time %ld.\n", - dev->name, status & 0x07ff); - } - } - default: - cmd->pa_next = I596_NULL; - lp->last_cmd = jiffies; - - } - barrier(); - } - - cmd = lp->cmd_head; - while (cmd && (cmd != lp->cmd_tail)) { - cmd->command &= 0x1fff; - cmd = pa_to_va(cmd->pa_next); - barrier(); - } - - if (lp->cmd_head) - *ack_cmdp |= CUC_START; - lp->scb.pa_cmd = va_to_pa(lp->cmd_head); - spin_unlock_irqrestore(&lp->cmd_lock, flags); -} - -static irqreturn_t -i596_interrupt(int irq, void *dev_instance) -{ - struct net_device *dev = dev_instance; - struct i596_private *lp = netdev_priv(dev); - unsigned short status, ack_cmd = 0; - int frames_in = 0; - - /* - * The 82596 examines the command, performs the required action, - * and then clears the SCB command word. - */ - if (lp->scb.command && i596_timeout(dev, "interrupt", 40)) - ; - - /* - * The status word indicates the status of the 82596. - * It is modified only by the 82596. - * - * [So, we must not clear it. I find often status 0xffff, - * which is not one of the values allowed by the docs.] - */ - status = lp->scb.status; -#if 0 - if (i596_debug) { - printk("%s: i596 interrupt, ", dev->name); - i596_out_status(status); - } -#endif - /* Impossible, but it happens - perhaps when we get - a receive interrupt but scb.pa_rfd is I596_NULL. */ - if (status == 0xffff) { - printk("%s: i596_interrupt: got status 0xffff\n", dev->name); - goto out; - } - - ack_cmd = (status & STAT_ACK); - - if (status & (STAT_CX | STAT_CNA)) - i596_handle_CU_completion(dev, lp, status, &ack_cmd); - - if (status & (STAT_FR | STAT_RNR)) { - /* Restart the receive unit when it got inactive somehow */ - if ((status & STAT_RNR) && netif_running(dev)) - ack_cmd |= RX_START; - - if (status & STAT_FR) { - frames_in = i596_rx(dev); - if (!frames_in) - printk("receive frame reported, but no frames\n"); - } - } - - /* acknowledge the interrupt */ - /* - if ((lp->scb.pa_cmd != I596_NULL) && netif_running(dev)) - ack_cmd |= CUC_START; - */ - - if (lp->scb.command && i596_timeout(dev, "i596 interrupt", 100)) - ; - - lp->scb.command = ack_cmd; - - CLEAR_INT(); - CA(); - - out: - return IRQ_HANDLED; -} - -static int i596_close(struct net_device *dev) { - struct i596_private *lp = netdev_priv(dev); - - netif_stop_queue(dev); - - if (i596_debug) - printk("%s: Shutting down ethercard, status was %4.4x.\n", - dev->name, lp->scb.status); - - lp->scb.command = (CUC_ABORT | RX_ABORT); - CA(); - - i596_cleanup_cmd(dev); - - if (lp->scb.command && i596_timeout(dev, "i596_close", 200)) - ; - - free_irq(dev->irq, dev); - remove_rx_bufs(dev); - - return 0; -} - -/* -* Set or clear the multicast filter for this adaptor. -*/ - -static void set_multicast_list(struct net_device *dev) { - struct i596_private *lp = netdev_priv(dev); - struct i596_cmd *cmd; - - if (i596_debug > 1) - printk ("%s: set multicast list %d\n", - dev->name, netdev_mc_count(dev)); - - if (!netdev_mc_empty(dev)) { - struct netdev_hw_addr *ha; - char *cp; - cmd = kmalloc(sizeof(struct i596_cmd) + 2 + - netdev_mc_count(dev) * 6, GFP_ATOMIC); - if (cmd == NULL) { - printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name); - return; - } - cmd->command = CmdMulticastList; - *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6; - cp = ((char *)(cmd + 1))+2; - netdev_for_each_mc_addr(ha, dev) { - memcpy(cp, ha->addr, 6); - cp += 6; - } - if (i596_debug & LOG_SRCDST) - print_eth (((char *)(cmd + 1)) + 2); - i596_add_cmd(dev, cmd); - } else { - if (lp->set_conf.pa_next != I596_NULL) { - return; - } - if (netdev_mc_empty(dev) && - !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { - lp->i596_config[8] &= ~0x01; - } else { - lp->i596_config[8] |= 0x01; - } - - i596_add_cmd(dev, &lp->set_conf); - } -} - -MODULE_AUTHOR("Ard van Breemen <ard@cstmel.nl.eu.org>"); -MODULE_DESCRIPTION("Intel Panther onboard i82596 driver"); -MODULE_LICENSE("GPL"); - -static struct net_device *dev_lp486e; -static int full_duplex; -static int options; -static int io = IOADDR; -static int irq = IRQ; - -module_param(debug, int, 0); -//module_param(max_interrupt_work, int, 0); -//module_param(reverse_probe, int, 0); -//module_param(rx_copybreak, int, 0); -module_param(options, int, 0); -module_param(full_duplex, int, 0); - -static int __init lp486e_init_module(void) { - int err; - struct net_device *dev = alloc_etherdev(sizeof(struct i596_private)); - if (!dev) - return -ENOMEM; - - dev->irq = irq; - dev->base_addr = io; - err = lp486e_probe(dev); - if (err) { - free_netdev(dev); - return err; - } - err = register_netdev(dev); - if (err) { - release_region(dev->base_addr, LP486E_TOTAL_SIZE); - free_netdev(dev); - return err; - } - dev_lp486e = dev; - full_duplex = 0; - options = 0; - return 0; -} - -static void __exit lp486e_cleanup_module(void) { - unregister_netdev(dev_lp486e); - release_region(dev_lp486e->base_addr, LP486E_TOTAL_SIZE); - free_netdev(dev_lp486e); -} - -module_init(lp486e_init_module); -module_exit(lp486e_cleanup_module); diff --git a/drivers/net/ethernet/i825xx/ni52.c b/drivers/net/ethernet/i825xx/ni52.c deleted file mode 100644 index 272976e1bb0f..000000000000 --- a/drivers/net/ethernet/i825xx/ni52.c +++ /dev/null @@ -1,1346 +0,0 @@ -/* - * net-3-driver for the NI5210 card (i82586 Ethernet chip) - * - * This is an extension to the Linux operating system, and is covered by the - * same GNU General Public License that covers that work. - * - * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later) - * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de) - * [feel free to mail ....] - * - * when using as module: (no autoprobing!) - * run with e.g: - * insmod ni52.o io=0x360 irq=9 memstart=0xd0000 memend=0xd4000 - * - * CAN YOU PLEASE REPORT ME YOUR PERFORMANCE EXPERIENCES !!. - * - * If you find a bug, please report me: - * The kernel panic output and any kmsg from the ni52 driver - * the ni5210-driver-version and the linux-kernel version - * how many shared memory (memsize) on the netcard, - * bootprom: yes/no, base_addr, mem_start - * maybe the ni5210-card revision and the i82586 version - * - * autoprobe for: base_addr: 0x300,0x280,0x360,0x320,0x340 - * mem_start: 0xd0000,0xd2000,0xc8000,0xca000,0xd4000,0xd6000, - * 0xd8000,0xcc000,0xce000,0xda000,0xdc000 - * - * sources: - * skeleton.c from Donald Becker - * - * I have also done a look in the following sources: (mail me if you need them) - * crynwr-packet-driver by Russ Nelson - * Garret A. Wollman's (fourth) i82586-driver for BSD - * (before getting an i82596 (yes 596 not 586) manual, the existing drivers - * helped me a lot to understand this tricky chip.) - * - * Known Problems: - * The internal sysbus seems to be slow. So we often lose packets because of - * overruns while receiving from a fast remote host. - * This can slow down TCP connections. Maybe the newer ni5210 cards are - * better. My experience is, that if a machine sends with more than about - * 500-600K/s the fifo/sysbus overflows. - * - * IMPORTANT NOTE: - * On fast networks, it's a (very) good idea to have 16K shared memory. With - * 8K, we can store only 4 receive frames, so it can (easily) happen that a - * remote machine 'overruns' our system. - * - * Known i82586/card problems (I'm sure, there are many more!): - * Running the NOP-mode, the i82586 sometimes seems to forget to report - * every xmit-interrupt until we restart the CU. - * Another MAJOR bug is, that the RU sometimes seems to ignore the EL-Bit - * in the RBD-Struct which indicates an end of the RBD queue. - * Instead, the RU fetches another (randomly selected and - * usually used) RBD and begins to fill it. (Maybe, this happens only if - * the last buffer from the previous RFD fits exact into the queue and - * the next RFD can't fetch an initial RBD. Anyone knows more? ) - * - * results from ftp performance tests with Linux 1.2.5 - * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s) - * sending in NOP-mode: peak performance up to 530K/s (but better don't - * run this mode) - */ - -/* - * 29.Sept.96: virt_to_bus changes for new memory scheme - * 19.Feb.96: more Mcast changes, module support (MH) - * - * 18.Nov.95: Mcast changes (AC). - * - * 23.April.95: fixed(?) receiving problems by configuring a RFD more - * than the number of RBD's. Can maybe cause other problems. - * 18.April.95: Added MODULE support (MH) - * 17.April.95: MC related changes in init586() and set_multicast_list(). - * removed use of 'jiffies' in init586() (MH) - * - * 19.Sep.94: Added Multicast support (not tested yet) (MH) - * - * 18.Sep.94: Workaround for 'EL-Bug'. Removed flexible RBD-handling. - * Now, every RFD has exact one RBD. (MH) - * - * 14.Sep.94: added promiscuous mode, a few cleanups (MH) - * - * 19.Aug.94: changed request_irq() parameter (MH) - * - * 20.July.94: removed cleanup bugs, removed a 16K-mem-probe-bug (MH) - * - * 19.July.94: lotsa cleanups .. (MH) - * - * 17.July.94: some patches ... verified to run with 1.1.29 (MH) - * - * 4.July.94: patches for Linux 1.1.24 (MH) - * - * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH) - * - * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff, - * too (MH) - * - * < 30.Sep.93: first versions - */ - -static int debuglevel; /* debug-printk 0: off 1: a few 2: more */ -static int automatic_resume; /* experimental .. better should be zero */ -static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */ -static int fifo = 0x8; /* don't change */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/bitops.h> -#include <asm/io.h> - -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> - -#include "ni52.h" - -#define DRV_NAME "ni52" - -#define DEBUG /* debug on */ -#define SYSBUSVAL 1 /* 8 Bit */ - -#define ni_attn586() { outb(0, dev->base_addr + NI52_ATTENTION); } -#define ni_reset586() { outb(0, dev->base_addr + NI52_RESET); } -#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); } -#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); } - -#define make32(ptr16) ((void __iomem *)(p->memtop + (short) (ptr16))) -#define make24(ptr32) ((char __iomem *)(ptr32)) - p->base -#define make16(ptr32) ((unsigned short) ((char __iomem *)(ptr32)\ - - p->memtop)) - -/******************* how to calculate the buffers ***************************** - - * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works - * --------------- in a different (more stable?) mode. Only in this mode it's - * possible to configure the driver with 'NO_NOPCOMMANDS' - -sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8; -sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT -sizeof(rfd) = 24; sizeof(rbd) = 12; -sizeof(tbd) = 8; sizeof(transmit_cmd) = 16; -sizeof(nop_cmd) = 8; - - * if you don't know the driver, better do not change these values: */ - -#define RECV_BUFF_SIZE 1524 /* slightly oversized */ -#define XMIT_BUFF_SIZE 1524 /* slightly oversized */ -#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */ -#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */ -#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */ -#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */ - -/**************************************************************************/ - - -#define NI52_TOTAL_SIZE 16 -#define NI52_ADDR0 0x02 -#define NI52_ADDR1 0x07 -#define NI52_ADDR2 0x01 - -static int ni52_probe1(struct net_device *dev, int ioaddr); -static irqreturn_t ni52_interrupt(int irq, void *dev_id); -static int ni52_open(struct net_device *dev); -static int ni52_close(struct net_device *dev); -static netdev_tx_t ni52_send_packet(struct sk_buff *, struct net_device *); -static struct net_device_stats *ni52_get_stats(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static void ni52_timeout(struct net_device *dev); - -/* helper-functions */ -static int init586(struct net_device *dev); -static int check586(struct net_device *dev, unsigned size); -static void alloc586(struct net_device *dev); -static void startrecv586(struct net_device *dev); -static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr); -static void ni52_rcv_int(struct net_device *dev); -static void ni52_xmt_int(struct net_device *dev); -static void ni52_rnr_int(struct net_device *dev); - -struct priv { - char __iomem *base; - char __iomem *mapped; - char __iomem *memtop; - spinlock_t spinlock; - int reset; - struct rfd_struct __iomem *rfd_last, *rfd_top, *rfd_first; - struct scp_struct __iomem *scp; - struct iscp_struct __iomem *iscp; - struct scb_struct __iomem *scb; - struct tbd_struct __iomem *xmit_buffs[NUM_XMIT_BUFFS]; -#if (NUM_XMIT_BUFFS == 1) - struct transmit_cmd_struct __iomem *xmit_cmds[2]; - struct nop_cmd_struct __iomem *nop_cmds[2]; -#else - struct transmit_cmd_struct __iomem *xmit_cmds[NUM_XMIT_BUFFS]; - struct nop_cmd_struct __iomem *nop_cmds[NUM_XMIT_BUFFS]; -#endif - int nop_point, num_recv_buffs; - char __iomem *xmit_cbuffs[NUM_XMIT_BUFFS]; - int xmit_count, xmit_last; -}; - -/* wait for command with timeout: */ -static void wait_for_scb_cmd(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - int i; - for (i = 0; i < 16384; i++) { - if (readb(&p->scb->cmd_cuc) == 0) - break; - udelay(4); - if (i == 16383) { - printk(KERN_ERR "%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n", - dev->name, readb(&p->scb->cmd_cuc), readb(&p->scb->cus)); - if (!p->reset) { - p->reset = 1; - ni_reset586(); - } - } - } -} - -static void wait_for_scb_cmd_ruc(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - int i; - for (i = 0; i < 16384; i++) { - if (readb(&p->scb->cmd_ruc) == 0) - break; - udelay(4); - if (i == 16383) { - printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n", - dev->name, readb(&p->scb->cmd_ruc), - readb(&p->scb->rus)); - if (!p->reset) { - p->reset = 1; - ni_reset586(); - } - } - } -} - -static void wait_for_stat_compl(void __iomem *p) -{ - struct nop_cmd_struct __iomem *addr = p; - int i; - for (i = 0; i < 32767; i++) { - if (readw(&((addr)->cmd_status)) & STAT_COMPL) - break; - udelay(32); - } -} - -/********************************************** - * close device - */ -static int ni52_close(struct net_device *dev) -{ - free_irq(dev->irq, dev); - ni_reset586(); /* the hard way to stop the receiver */ - netif_stop_queue(dev); - return 0; -} - -/********************************************** - * open device - */ -static int ni52_open(struct net_device *dev) -{ - int ret; - - ni_disint(); - alloc586(dev); - init586(dev); - startrecv586(dev); - ni_enaint(); - - ret = request_irq(dev->irq, ni52_interrupt, 0, dev->name, dev); - if (ret) { - ni_reset586(); - return ret; - } - netif_start_queue(dev); - return 0; /* most done by init */ -} - -static int check_iscp(struct net_device *dev, void __iomem *addr) -{ - struct iscp_struct __iomem *iscp = addr; - struct priv *p = netdev_priv(dev); - memset_io(iscp, 0, sizeof(struct iscp_struct)); - - writel(make24(iscp), &p->scp->iscp); - writeb(1, &iscp->busy); - - ni_reset586(); - ni_attn586(); - mdelay(32); /* wait a while... */ - /* i82586 clears 'busy' after successful init */ - if (readb(&iscp->busy)) - return 0; - return 1; -} - -/********************************************** - * Check to see if there's an 82586 out there. - */ -static int check586(struct net_device *dev, unsigned size) -{ - struct priv *p = netdev_priv(dev); - int i; - - p->mapped = ioremap(dev->mem_start, size); - if (!p->mapped) - return 0; - - p->base = p->mapped + size - 0x01000000; - p->memtop = p->mapped + size; - p->scp = (struct scp_struct __iomem *)(p->base + SCP_DEFAULT_ADDRESS); - p->scb = (struct scb_struct __iomem *) p->mapped; - p->iscp = (struct iscp_struct __iomem *)p->scp - 1; - memset_io(p->scp, 0, sizeof(struct scp_struct)); - for (i = 0; i < sizeof(struct scp_struct); i++) - /* memory was writeable? */ - if (readb((char __iomem *)p->scp + i)) - goto Enodev; - writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */ - if (readb(&p->scp->sysbus) != SYSBUSVAL) - goto Enodev; - - if (!check_iscp(dev, p->mapped)) - goto Enodev; - if (!check_iscp(dev, p->iscp)) - goto Enodev; - return 1; -Enodev: - iounmap(p->mapped); - return 0; -} - -/****************************************************************** - * set iscp at the right place, called by ni52_probe1 and open586. - */ -static void alloc586(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - ni_reset586(); - mdelay(32); - - memset_io(p->iscp, 0, sizeof(struct iscp_struct)); - memset_io(p->scp , 0, sizeof(struct scp_struct)); - - writel(make24(p->iscp), &p->scp->iscp); - writeb(SYSBUSVAL, &p->scp->sysbus); - writew(make16(p->scb), &p->iscp->scb_offset); - - writeb(1, &p->iscp->busy); - ni_reset586(); - ni_attn586(); - - mdelay(32); - - if (readb(&p->iscp->busy)) - printk(KERN_ERR "%s: Init-Problems (alloc).\n", dev->name); - - p->reset = 0; - - memset_io(p->scb, 0, sizeof(struct scb_struct)); -} - -/* set: io,irq,memstart,memend or set it when calling insmod */ -static int irq = 9; -static int io = 0x300; -static long memstart; /* e.g 0xd0000 */ -static long memend; /* e.g 0xd4000 */ - -/********************************************** - * probe the ni5210-card - */ -struct net_device * __init ni52_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct priv)); - static const int ports[] = {0x300, 0x280, 0x360, 0x320, 0x340, 0}; - const int *port; - struct priv *p; - int err = 0; - - if (!dev) - return ERR_PTR(-ENOMEM); - - p = netdev_priv(dev); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - io = dev->base_addr; - irq = dev->irq; - memstart = dev->mem_start; - memend = dev->mem_end; - } - - if (io > 0x1ff) { /* Check a single specified location. */ - err = ni52_probe1(dev, io); - } else if (io > 0) { /* Don't probe at all. */ - err = -ENXIO; - } else { - for (port = ports; *port && ni52_probe1(dev, *port) ; port++) - ; - if (*port) - goto got_it; -#ifdef FULL_IO_PROBE - for (io = 0x200; io < 0x400 && ni52_probe1(dev, io); io += 8) - ; - if (io < 0x400) - goto got_it; -#endif - err = -ENODEV; - } - if (err) - goto out; -got_it: - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - iounmap(p->mapped); - release_region(dev->base_addr, NI52_TOTAL_SIZE); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static const struct net_device_ops ni52_netdev_ops = { - .ndo_open = ni52_open, - .ndo_stop = ni52_close, - .ndo_get_stats = ni52_get_stats, - .ndo_tx_timeout = ni52_timeout, - .ndo_start_xmit = ni52_send_packet, - .ndo_set_rx_mode = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __init ni52_probe1(struct net_device *dev, int ioaddr) -{ - int i, size, retval; - struct priv *priv = netdev_priv(dev); - - dev->base_addr = ioaddr; - dev->irq = irq; - dev->mem_start = memstart; - dev->mem_end = memend; - - spin_lock_init(&priv->spinlock); - - if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME)) - return -EBUSY; - - if (!(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) || - !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) { - retval = -ENODEV; - goto out; - } - - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = inb(dev->base_addr+i); - - if (dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1 || - dev->dev_addr[2] != NI52_ADDR2) { - retval = -ENODEV; - goto out; - } - - printk(KERN_INFO "%s: NI5210 found at %#3lx, ", - dev->name, dev->base_addr); - - /* - * check (or search) IO-Memory, 8K and 16K - */ -#ifdef MODULE - size = dev->mem_end - dev->mem_start; - if (size != 0x2000 && size != 0x4000) { - printk("\n"); - printk(KERN_ERR "%s: Invalid memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n", dev->name, size); - retval = -ENODEV; - goto out; - } - if (!check586(dev, size)) { - printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size); - retval = -ENODEV; - goto out; - } -#else - if (dev->mem_start != 0) { - /* no auto-mem-probe */ - size = 0x4000; /* check for 16K mem */ - if (!check586(dev, size)) { - size = 0x2000; /* check for 8K mem */ - if (!check586(dev, size)) { - printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start); - retval = -ENODEV; - goto out; - } - } - } else { - static const unsigned long memaddrs[] = { - 0xc8000, 0xca000, 0xcc000, 0xce000, 0xd0000, 0xd2000, - 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0 - }; - for (i = 0;; i++) { - if (!memaddrs[i]) { - printk(KERN_ERR "?memprobe, Can't find io-memory!\n"); - retval = -ENODEV; - goto out; - } - dev->mem_start = memaddrs[i]; - size = 0x2000; /* check for 8K mem */ - if (check586(dev, size)) - /* 8K-check */ - break; - size = 0x4000; /* check for 16K mem */ - if (check586(dev, size)) - /* 16K-check */ - break; - } - } - /* set mem_end showed by 'ifconfig' */ - dev->mem_end = dev->mem_start + size; -#endif - - alloc586(dev); - - /* set number of receive-buffs according to memsize */ - if (size == 0x2000) - priv->num_recv_buffs = NUM_RECV_BUFFS_8; - else - priv->num_recv_buffs = NUM_RECV_BUFFS_16; - - printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ", - dev->mem_start, size); - - if (dev->irq < 2) { - unsigned long irq_mask; - - irq_mask = probe_irq_on(); - ni_reset586(); - ni_attn586(); - - mdelay(20); - dev->irq = probe_irq_off(irq_mask); - if (!dev->irq) { - printk("?autoirq, Failed to detect IRQ line!\n"); - retval = -EAGAIN; - iounmap(priv->mapped); - goto out; - } - printk("IRQ %d (autodetected).\n", dev->irq); - } else { - if (dev->irq == 2) - dev->irq = 9; - printk("IRQ %d (assigned and not checked!).\n", dev->irq); - } - - dev->netdev_ops = &ni52_netdev_ops; - dev->watchdog_timeo = HZ/20; - - return 0; -out: - release_region(ioaddr, NI52_TOTAL_SIZE); - return retval; -} - -/********************************************** - * init the chip (ni52-interrupt should be disabled?!) - * needs a correct 'allocated' memory - */ - -static int init586(struct net_device *dev) -{ - void __iomem *ptr; - int i, result = 0; - struct priv *p = netdev_priv(dev); - struct configure_cmd_struct __iomem *cfg_cmd; - struct iasetup_cmd_struct __iomem *ias_cmd; - struct tdr_cmd_struct __iomem *tdr_cmd; - struct mcsetup_cmd_struct __iomem *mc_cmd; - struct netdev_hw_addr *ha; - int num_addrs = netdev_mc_count(dev); - - ptr = p->scb + 1; - - cfg_cmd = ptr; /* configure-command */ - writew(0, &cfg_cmd->cmd_status); - writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd); - writew(0xFFFF, &cfg_cmd->cmd_link); - - /* number of cfg bytes */ - writeb(0x0a, &cfg_cmd->byte_cnt); - /* fifo-limit (8=tx:32/rx:64) */ - writeb(fifo, &cfg_cmd->fifo); - /* hold or discard bad recv frames (bit 7) */ - writeb(0x40, &cfg_cmd->sav_bf); - /* addr_len |!src_insert |pre-len |loopback */ - writeb(0x2e, &cfg_cmd->adr_len); - writeb(0x00, &cfg_cmd->priority); - writeb(0x60, &cfg_cmd->ifs); - writeb(0x00, &cfg_cmd->time_low); - writeb(0xf2, &cfg_cmd->time_high); - writeb(0x00, &cfg_cmd->promisc); - if (dev->flags & IFF_ALLMULTI) { - int len = ((char __iomem *)p->iscp - (char __iomem *)ptr - 8) / 6; - if (num_addrs > len) { - printk(KERN_ERR "%s: switching to promisc. mode\n", - dev->name); - writeb(0x01, &cfg_cmd->promisc); - } - } - if (dev->flags & IFF_PROMISC) - writeb(0x01, &cfg_cmd->promisc); - writeb(0x00, &cfg_cmd->carr_coll); - writew(make16(cfg_cmd), &p->scb->cbl_offset); - writeb(0, &p->scb->cmd_ruc); - - writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ - ni_attn586(); - - wait_for_stat_compl(cfg_cmd); - - if ((readw(&cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != - (STAT_COMPL|STAT_OK)) { - printk(KERN_ERR "%s: configure command failed: %x\n", - dev->name, readw(&cfg_cmd->cmd_status)); - return 1; - } - - /* - * individual address setup - */ - - ias_cmd = ptr; - - writew(0, &ias_cmd->cmd_status); - writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd); - writew(0xffff, &ias_cmd->cmd_link); - - memcpy_toio(&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN); - - writew(make16(ias_cmd), &p->scb->cbl_offset); - - writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ - ni_attn586(); - - wait_for_stat_compl(ias_cmd); - - if ((readw(&ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != - (STAT_OK|STAT_COMPL)) { - printk(KERN_ERR "%s (ni52): individual address setup command failed: %04x\n", dev->name, readw(&ias_cmd->cmd_status)); - return 1; - } - - /* - * TDR, wire check .. e.g. no resistor e.t.c - */ - - tdr_cmd = ptr; - - writew(0, &tdr_cmd->cmd_status); - writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd); - writew(0xffff, &tdr_cmd->cmd_link); - writew(0, &tdr_cmd->status); - - writew(make16(tdr_cmd), &p->scb->cbl_offset); - writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ - ni_attn586(); - - wait_for_stat_compl(tdr_cmd); - - if (!(readw(&tdr_cmd->cmd_status) & STAT_COMPL)) - printk(KERN_ERR "%s: Problems while running the TDR.\n", - dev->name); - else { - udelay(16); - result = readw(&tdr_cmd->status); - writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc); - ni_attn586(); /* ack the interrupts */ - - if (result & TDR_LNK_OK) - ; - else if (result & TDR_XCVR_PRB) - printk(KERN_ERR "%s: TDR: Transceiver problem. Check the cable(s)!\n", - dev->name); - else if (result & TDR_ET_OPN) - printk(KERN_ERR "%s: TDR: No correct termination %d clocks away.\n", - dev->name, result & TDR_TIMEMASK); - else if (result & TDR_ET_SRT) { - /* time == 0 -> strange :-) */ - if (result & TDR_TIMEMASK) - printk(KERN_ERR "%s: TDR: Detected a short circuit %d clocks away.\n", - dev->name, result & TDR_TIMEMASK); - } else - printk(KERN_ERR "%s: TDR: Unknown status %04x\n", - dev->name, result); - } - - /* - * Multicast setup - */ - if (num_addrs && !(dev->flags & IFF_PROMISC)) { - mc_cmd = ptr; - writew(0, &mc_cmd->cmd_status); - writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd); - writew(0xffff, &mc_cmd->cmd_link); - writew(num_addrs * 6, &mc_cmd->mc_cnt); - - i = 0; - netdev_for_each_mc_addr(ha, dev) - memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6); - - writew(make16(mc_cmd), &p->scb->cbl_offset); - writeb(CUC_START, &p->scb->cmd_cuc); - ni_attn586(); - - wait_for_stat_compl(mc_cmd); - - if ((readw(&mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK)) - != (STAT_COMPL|STAT_OK)) - printk(KERN_ERR "%s: Can't apply multicast-address-list.\n", dev->name); - } - - /* - * alloc nop/xmit-cmds - */ -#if (NUM_XMIT_BUFFS == 1) - for (i = 0; i < 2; i++) { - p->nop_cmds[i] = ptr; - writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd); - writew(0, &p->nop_cmds[i]->cmd_status); - writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link); - ptr = ptr + sizeof(struct nop_cmd_struct); - } -#else - for (i = 0; i < NUM_XMIT_BUFFS; i++) { - p->nop_cmds[i] = ptr; - writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd); - writew(0, &p->nop_cmds[i]->cmd_status); - writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link); - ptr = ptr + sizeof(struct nop_cmd_struct); - } -#endif - - ptr = alloc_rfa(dev, ptr); /* init receive-frame-area */ - - /* - * alloc xmit-buffs / init xmit_cmds - */ - for (i = 0; i < NUM_XMIT_BUFFS; i++) { - /* Transmit cmd/buff 0 */ - p->xmit_cmds[i] = ptr; - ptr = ptr + sizeof(struct transmit_cmd_struct); - p->xmit_cbuffs[i] = ptr; /* char-buffs */ - ptr = ptr + XMIT_BUFF_SIZE; - p->xmit_buffs[i] = ptr; /* TBD */ - ptr = ptr + sizeof(struct tbd_struct); - if ((void __iomem *)ptr > (void __iomem *)p->iscp) { - printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n", - dev->name); - return 1; - } - memset_io(p->xmit_cmds[i], 0, - sizeof(struct transmit_cmd_struct)); - memset_io(p->xmit_buffs[i], 0, - sizeof(struct tbd_struct)); - writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]), - &p->xmit_cmds[i]->cmd_link); - writew(STAT_COMPL, &p->xmit_cmds[i]->cmd_status); - writew(CMD_XMIT|CMD_INT, &p->xmit_cmds[i]->cmd_cmd); - writew(make16(p->xmit_buffs[i]), &p->xmit_cmds[i]->tbd_offset); - writew(0xffff, &p->xmit_buffs[i]->next); - writel(make24(p->xmit_cbuffs[i]), &p->xmit_buffs[i]->buffer); - } - - p->xmit_count = 0; - p->xmit_last = 0; -#ifndef NO_NOPCOMMANDS - p->nop_point = 0; -#endif - - /* - * 'start transmitter' - */ -#ifndef NO_NOPCOMMANDS - writew(make16(p->nop_cmds[0]), &p->scb->cbl_offset); - writeb(CUC_START, &p->scb->cmd_cuc); - ni_attn586(); - wait_for_scb_cmd(dev); -#else - writew(make16(p->xmit_cmds[0]), &p->xmit_cmds[0]->cmd_link); - writew(CMD_XMIT | CMD_SUSPEND | CMD_INT, &p->xmit_cmds[0]->cmd_cmd); -#endif - - /* - * ack. interrupts - */ - writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc); - ni_attn586(); - udelay(16); - - ni_enaint(); - - return 0; -} - -/****************************************************** - * This is a helper routine for ni52_rnr_int() and init586(). - * It sets up the Receive Frame Area (RFA). - */ - -static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr) -{ - struct rfd_struct __iomem *rfd = ptr; - struct rbd_struct __iomem *rbd; - int i; - struct priv *p = netdev_priv(dev); - - memset_io(rfd, 0, - sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd)); - p->rfd_first = rfd; - - for (i = 0; i < (p->num_recv_buffs + rfdadd); i++) { - writew(make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd)), - &rfd[i].next); - writew(0xffff, &rfd[i].rbd_offset); - } - /* RU suspend */ - writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last); - - ptr = rfd + (p->num_recv_buffs + rfdadd); - - rbd = ptr; - ptr = rbd + p->num_recv_buffs; - - /* clr descriptors */ - memset_io(rbd, 0, sizeof(struct rbd_struct) * (p->num_recv_buffs)); - - for (i = 0; i < p->num_recv_buffs; i++) { - writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next); - writew(RECV_BUFF_SIZE, &rbd[i].size); - writel(make24(ptr), &rbd[i].buffer); - ptr = ptr + RECV_BUFF_SIZE; - } - p->rfd_top = p->rfd_first; - p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd); - - writew(make16(p->rfd_first), &p->scb->rfa_offset); - writew(make16(rbd), &p->rfd_first->rbd_offset); - - return ptr; -} - - -/************************************************** - * Interrupt Handler ... - */ - -static irqreturn_t ni52_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - unsigned int stat; - int cnt = 0; - struct priv *p; - - p = netdev_priv(dev); - - if (debuglevel > 1) - printk("I"); - - spin_lock(&p->spinlock); - - wait_for_scb_cmd(dev); /* wait for last command */ - - while ((stat = readb(&p->scb->cus) & STAT_MASK)) { - writeb(stat, &p->scb->cmd_cuc); - ni_attn586(); - - if (stat & STAT_FR) /* received a frame */ - ni52_rcv_int(dev); - - if (stat & STAT_RNR) { /* RU went 'not ready' */ - printk("(R)"); - if (readb(&p->scb->rus) & RU_SUSPEND) { - /* special case: RU_SUSPEND */ - wait_for_scb_cmd(dev); - writeb(RUC_RESUME, &p->scb->cmd_ruc); - ni_attn586(); - wait_for_scb_cmd_ruc(dev); - } else { - printk(KERN_ERR "%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n", - dev->name, stat, readb(&p->scb->rus)); - ni52_rnr_int(dev); - } - } - - /* Command with I-bit set complete */ - if (stat & STAT_CX) - ni52_xmt_int(dev); - -#ifndef NO_NOPCOMMANDS - if (stat & STAT_CNA) { /* CU went 'not ready' */ - if (netif_running(dev)) - printk(KERN_ERR "%s: oops! CU has left active state. stat: %04x/%02x.\n", - dev->name, stat, readb(&p->scb->cus)); - } -#endif - - if (debuglevel > 1) - printk("%d", cnt++); - - /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */ - wait_for_scb_cmd(dev); - if (readb(&p->scb->cmd_cuc)) { /* timed out? */ - printk(KERN_ERR "%s: Acknowledge timed out.\n", - dev->name); - ni_disint(); - break; - } - } - spin_unlock(&p->spinlock); - - if (debuglevel > 1) - printk("i"); - return IRQ_HANDLED; -} - -/******************************************************* - * receive-interrupt - */ - -static void ni52_rcv_int(struct net_device *dev) -{ - int status, cnt = 0; - unsigned short totlen; - struct sk_buff *skb; - struct rbd_struct __iomem *rbd; - struct priv *p = netdev_priv(dev); - - if (debuglevel > 0) - printk("R"); - - for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) { - rbd = make32(readw(&p->rfd_top->rbd_offset)); - if (status & RFD_OK) { /* frame received without error? */ - totlen = readw(&rbd->status); - if (totlen & RBD_LAST) { - /* the first and the last buffer? */ - totlen &= RBD_MASK; /* length of this frame */ - writew(0x00, &rbd->status); - skb = netdev_alloc_skb(dev, totlen + 2); - if (skb != NULL) { - skb_reserve(skb, 2); - skb_put(skb, totlen); - memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += totlen; - } else - dev->stats.rx_dropped++; - } else { - int rstat; - /* free all RBD's until RBD_LAST is set */ - totlen = 0; - while (!((rstat = readw(&rbd->status)) & RBD_LAST)) { - totlen += rstat & RBD_MASK; - if (!rstat) { - printk(KERN_ERR "%s: Whoops .. no end mark in RBD list\n", dev->name); - break; - } - writew(0, &rbd->status); - rbd = make32(readw(&rbd->next)); - } - totlen += rstat & RBD_MASK; - writew(0, &rbd->status); - printk(KERN_ERR "%s: received oversized frame! length: %d\n", - dev->name, totlen); - dev->stats.rx_dropped++; - } - } else {/* frame !(ok), only with 'save-bad-frames' */ - printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n", - dev->name, status); - dev->stats.rx_errors++; - } - writeb(0, &p->rfd_top->stat_high); - writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */ - writew(0xffff, &p->rfd_top->rbd_offset); - writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */ - p->rfd_last = p->rfd_top; - p->rfd_top = make32(readw(&p->rfd_top->next)); /* step to next RFD */ - writew(make16(p->rfd_top), &p->scb->rfa_offset); - - if (debuglevel > 0) - printk("%d", cnt++); - } - - if (automatic_resume) { - wait_for_scb_cmd(dev); - writeb(RUC_RESUME, &p->scb->cmd_ruc); - ni_attn586(); - wait_for_scb_cmd_ruc(dev); - } - -#ifdef WAIT_4_BUSY - { - int i; - for (i = 0; i < 1024; i++) { - if (p->rfd_top->status) - break; - udelay(16); - if (i == 1023) - printk(KERN_ERR "%s: RU hasn't fetched next RFD (not busy/complete)\n", dev->name); - } - } -#endif - if (debuglevel > 0) - printk("r"); -} - -/********************************************************** - * handle 'Receiver went not ready'. - */ - -static void ni52_rnr_int(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - dev->stats.rx_errors++; - - wait_for_scb_cmd(dev); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ - writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */ - ni_attn586(); - wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */ - - alloc_rfa(dev, p->rfd_first); - /* maybe add a check here, before restarting the RU */ - startrecv586(dev); /* restart RU */ - - printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n", - dev->name, readb(&p->scb->rus)); - -} - -/********************************************************** - * handle xmit - interrupt - */ - -static void ni52_xmt_int(struct net_device *dev) -{ - int status; - struct priv *p = netdev_priv(dev); - - if (debuglevel > 0) - printk("X"); - - status = readw(&p->xmit_cmds[p->xmit_last]->cmd_status); - if (!(status & STAT_COMPL)) - printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name); - - if (status & STAT_OK) { - dev->stats.tx_packets++; - dev->stats.collisions += (status & TCMD_MAXCOLLMASK); - } else { - dev->stats.tx_errors++; - if (status & TCMD_LATECOLL) { - printk(KERN_ERR "%s: late collision detected.\n", - dev->name); - dev->stats.collisions++; - } else if (status & TCMD_NOCARRIER) { - dev->stats.tx_carrier_errors++; - printk(KERN_ERR "%s: no carrier detected.\n", - dev->name); - } else if (status & TCMD_LOSTCTS) - printk(KERN_ERR "%s: loss of CTS detected.\n", - dev->name); - else if (status & TCMD_UNDERRUN) { - dev->stats.tx_fifo_errors++; - printk(KERN_ERR "%s: DMA underrun detected.\n", - dev->name); - } else if (status & TCMD_MAXCOLL) { - printk(KERN_ERR "%s: Max. collisions exceeded.\n", - dev->name); - dev->stats.collisions += 16; - } - } -#if (NUM_XMIT_BUFFS > 1) - if ((++p->xmit_last) == NUM_XMIT_BUFFS) - p->xmit_last = 0; -#endif - netif_wake_queue(dev); -} - -/*********************************************************** - * (re)start the receiver - */ - -static void startrecv586(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - wait_for_scb_cmd(dev); - wait_for_scb_cmd_ruc(dev); - writew(make16(p->rfd_first), &p->scb->rfa_offset); - writeb(RUC_START, &p->scb->cmd_ruc); - ni_attn586(); /* start cmd. */ - wait_for_scb_cmd_ruc(dev); - /* wait for accept cmd. (no timeout!!) */ -} - -static void ni52_timeout(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); -#ifndef NO_NOPCOMMANDS - if (readb(&p->scb->cus) & CU_ACTIVE) { /* COMMAND-UNIT active? */ - netif_wake_queue(dev); -#ifdef DEBUG - printk(KERN_ERR "%s: strange ... timeout with CU active?!?\n", - dev->name); - printk(KERN_ERR "%s: X0: %04x N0: %04x N1: %04x %d\n", - dev->name, (int)p->xmit_cmds[0]->cmd_status, - readw(&p->nop_cmds[0]->cmd_status), - readw(&p->nop_cmds[1]->cmd_status), - p->nop_point); -#endif - writeb(CUC_ABORT, &p->scb->cmd_cuc); - ni_attn586(); - wait_for_scb_cmd(dev); - writew(make16(p->nop_cmds[p->nop_point]), &p->scb->cbl_offset); - writeb(CUC_START, &p->scb->cmd_cuc); - ni_attn586(); - wait_for_scb_cmd(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ - return 0; - } -#endif - { -#ifdef DEBUG - printk(KERN_ERR "%s: xmitter timed out, try to restart! stat: %02x\n", - dev->name, readb(&p->scb->cus)); - printk(KERN_ERR "%s: command-stats: %04x %04x\n", - dev->name, - readw(&p->xmit_cmds[0]->cmd_status), - readw(&p->xmit_cmds[1]->cmd_status)); - printk(KERN_ERR "%s: check, whether you set the right interrupt number!\n", - dev->name); -#endif - ni52_close(dev); - ni52_open(dev); - } - dev->trans_start = jiffies; /* prevent tx timeout */ -} - -/****************************************************** - * send frame - */ - -static netdev_tx_t ni52_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - int len, i; -#ifndef NO_NOPCOMMANDS - int next_nop; -#endif - struct priv *p = netdev_priv(dev); - - if (skb->len > XMIT_BUFF_SIZE) { - printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len); - return NETDEV_TX_OK; - } - - netif_stop_queue(dev); - - memcpy_toio(p->xmit_cbuffs[p->xmit_count], skb->data, skb->len); - len = skb->len; - if (len < ETH_ZLEN) { - len = ETH_ZLEN; - memset_io(p->xmit_cbuffs[p->xmit_count]+skb->len, 0, - len - skb->len); - } - -#if (NUM_XMIT_BUFFS == 1) -# ifdef NO_NOPCOMMANDS - -#ifdef DEBUG - if (readb(&p->scb->cus) & CU_ACTIVE) { - printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name); - printk(KERN_ERR "%s: stat: %04x %04x\n", - dev->name, readb(&p->scb->cus), - readw(&p->xmit_cmds[0]->cmd_status)); - } -#endif - writew(TBD_LAST | len, &p->xmit_buffs[0]->size); - for (i = 0; i < 16; i++) { - writew(0, &p->xmit_cmds[0]->cmd_status); - wait_for_scb_cmd(dev); - if ((readb(&p->scb->cus) & CU_STATUS) == CU_SUSPEND) - writeb(CUC_RESUME, &p->scb->cmd_cuc); - else { - writew(make16(p->xmit_cmds[0]), &p->scb->cbl_offset); - writeb(CUC_START, &p->scb->cmd_cuc); - } - ni_attn586(); - if (!i) - dev_kfree_skb(skb); - wait_for_scb_cmd(dev); - /* test it, because CU sometimes doesn't start immediately */ - if (readb(&p->scb->cus) & CU_ACTIVE) - break; - if (readw(&p->xmit_cmds[0]->cmd_status)) - break; - if (i == 15) - printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name); - } -# else - next_nop = (p->nop_point + 1) & 0x1; - writew(TBD_LAST | len, &p->xmit_buffs[0]->size); - writew(make16(p->nop_cmds[next_nop]), &p->xmit_cmds[0]->cmd_link); - writew(make16(p->nop_cmds[next_nop]), - &p->nop_cmds[next_nop]->cmd_link); - writew(0, &p->xmit_cmds[0]->cmd_status); - writew(0, &p->nop_cmds[next_nop]->cmd_status); - - writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link); - p->nop_point = next_nop; - dev_kfree_skb(skb); -# endif -#else - writew(TBD_LAST | len, &p->xmit_buffs[p->xmit_count]->size); - next_nop = p->xmit_count + 1 - if (next_nop == NUM_XMIT_BUFFS) - next_nop = 0; - writew(0, &p->xmit_cmds[p->xmit_count]->cmd_status); - /* linkpointer of xmit-command already points to next nop cmd */ - writew(make16(p->nop_cmds[next_nop]), - &p->nop_cmds[next_nop]->cmd_link); - writew(0, &p->nop_cmds[next_nop]->cmd_status); - writew(make16(p->xmit_cmds[p->xmit_count]), - &p->nop_cmds[p->xmit_count]->cmd_link); - p->xmit_count = next_nop; - { - unsigned long flags; - spin_lock_irqsave(&p->spinlock); - if (p->xmit_count != p->xmit_last) - netif_wake_queue(dev); - spin_unlock_irqrestore(&p->spinlock); - } - dev_kfree_skb(skb); -#endif - return NETDEV_TX_OK; -} - -/******************************************* - * Someone wanna have the statistics - */ - -static struct net_device_stats *ni52_get_stats(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - unsigned short crc, aln, rsc, ovrn; - - /* Get error-statistics from the ni82586 */ - crc = readw(&p->scb->crc_errs); - writew(0, &p->scb->crc_errs); - aln = readw(&p->scb->aln_errs); - writew(0, &p->scb->aln_errs); - rsc = readw(&p->scb->rsc_errs); - writew(0, &p->scb->rsc_errs); - ovrn = readw(&p->scb->ovrn_errs); - writew(0, &p->scb->ovrn_errs); - - dev->stats.rx_crc_errors += crc; - dev->stats.rx_fifo_errors += ovrn; - dev->stats.rx_frame_errors += aln; - dev->stats.rx_dropped += rsc; - - return &dev->stats; -} - -/******************************************************** - * Set MC list .. - */ - -static void set_multicast_list(struct net_device *dev) -{ - netif_stop_queue(dev); - ni_disint(); - alloc586(dev); - init586(dev); - startrecv586(dev); - ni_enaint(); - netif_wake_queue(dev); -} - -#ifdef MODULE -static struct net_device *dev_ni52; - -module_param(io, int, 0); -module_param(irq, int, 0); -module_param(memstart, long, 0); -module_param(memend, long, 0); -MODULE_PARM_DESC(io, "NI5210 I/O base address,required"); -MODULE_PARM_DESC(irq, "NI5210 IRQ number,required"); -MODULE_PARM_DESC(memstart, "NI5210 memory base address,required"); -MODULE_PARM_DESC(memend, "NI5210 memory end address,required"); - -int __init init_module(void) -{ - if (io <= 0x0 || !memend || !memstart || irq < 2) { - printk(KERN_ERR "ni52: Autoprobing not allowed for modules.\n"); - printk(KERN_ERR "ni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); - return -ENODEV; - } - dev_ni52 = ni52_probe(-1); - if (IS_ERR(dev_ni52)) - return PTR_ERR(dev_ni52); - return 0; -} - -void __exit cleanup_module(void) -{ - struct priv *p = netdev_priv(dev_ni52); - unregister_netdev(dev_ni52); - iounmap(p->mapped); - release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE); - free_netdev(dev_ni52); -} -#endif /* MODULE */ - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/i825xx/ni52.h b/drivers/net/ethernet/i825xx/ni52.h deleted file mode 100644 index 0a03b2883327..000000000000 --- a/drivers/net/ethernet/i825xx/ni52.h +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Intel i82586 Ethernet definitions - * - * This is an extension to the Linux operating system, and is covered by the - * same GNU General Public License that covers that work. - * - * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de) - * - * I have done a look in the following sources: - * crynwr-packet-driver by Russ Nelson - * Garret A. Wollman's i82586-driver for BSD - */ - - -#define NI52_RESET 0 /* writing to this address, resets the i82586 */ -#define NI52_ATTENTION 1 /* channel attention, kick the 586 */ -#define NI52_TENA 3 /* 2-5 possibly wrong, Xmit enable */ -#define NI52_TDIS 2 /* Xmit disable */ -#define NI52_INTENA 5 /* Interrupt enable */ -#define NI52_INTDIS 4 /* Interrupt disable */ -#define NI52_MAGIC1 6 /* dunno exact function */ -#define NI52_MAGIC2 7 /* dunno exact function */ - -#define NI52_MAGICVAL1 0x00 /* magic-values for ni5210 card */ -#define NI52_MAGICVAL2 0x55 - -/* - * where to find the System Configuration Pointer (SCP) - */ -#define SCP_DEFAULT_ADDRESS 0xfffff4 - - -/* - * System Configuration Pointer Struct - */ - -struct scp_struct -{ - u16 zero_dum0; /* has to be zero */ - u8 sysbus; /* 0=16Bit,1=8Bit */ - u8 zero_dum1; /* has to be zero for 586 */ - u16 zero_dum2; - u16 zero_dum3; - u32 iscp; /* pointer to the iscp-block */ -}; - - -/* - * Intermediate System Configuration Pointer (ISCP) - */ -struct iscp_struct -{ - u8 busy; /* 586 clears after successful init */ - u8 zero_dummy; /* has to be zero */ - u16 scb_offset; /* pointeroffset to the scb_base */ - u32 scb_base; /* base-address of all 16-bit offsets */ -}; - -/* - * System Control Block (SCB) - */ -struct scb_struct -{ - u8 rus; - u8 cus; - u8 cmd_ruc; /* command word: RU part */ - u8 cmd_cuc; /* command word: CU part & ACK */ - u16 cbl_offset; /* pointeroffset, command block list */ - u16 rfa_offset; /* pointeroffset, receive frame area */ - u16 crc_errs; /* CRC-Error counter */ - u16 aln_errs; /* alignmenterror counter */ - u16 rsc_errs; /* Resourceerror counter */ - u16 ovrn_errs; /* OVerrunerror counter */ -}; - -/* - * possible command values for the command word - */ -#define RUC_MASK 0x0070 /* mask for RU commands */ -#define RUC_NOP 0x0000 /* NOP-command */ -#define RUC_START 0x0010 /* start RU */ -#define RUC_RESUME 0x0020 /* resume RU after suspend */ -#define RUC_SUSPEND 0x0030 /* suspend RU */ -#define RUC_ABORT 0x0040 /* abort receiver operation immediately */ - -#define CUC_MASK 0x07 /* mask for CU command */ -#define CUC_NOP 0x00 /* NOP-command */ -#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */ -#define CUC_RESUME 0x02 /* resume after suspend */ -#define CUC_SUSPEND 0x03 /* Suspend CU */ -#define CUC_ABORT 0x04 /* abort command operation immediately */ - -#define ACK_MASK 0xf0 /* mask for ACK command */ -#define ACK_CX 0x80 /* acknowledges STAT_CX */ -#define ACK_FR 0x40 /* ack. STAT_FR */ -#define ACK_CNA 0x20 /* ack. STAT_CNA */ -#define ACK_RNR 0x10 /* ack. STAT_RNR */ - -/* - * possible status values for the status word - */ -#define STAT_MASK 0xf0 /* mask for cause of interrupt */ -#define STAT_CX 0x80 /* CU finished cmd with its I bit set */ -#define STAT_FR 0x40 /* RU finished receiving a frame */ -#define STAT_CNA 0x20 /* CU left active state */ -#define STAT_RNR 0x10 /* RU left ready state */ - -#define CU_STATUS 0x7 /* CU status, 0=idle */ -#define CU_SUSPEND 0x1 /* CU is suspended */ -#define CU_ACTIVE 0x2 /* CU is active */ - -#define RU_STATUS 0x70 /* RU status, 0=idle */ -#define RU_SUSPEND 0x10 /* RU suspended */ -#define RU_NOSPACE 0x20 /* RU no resources */ -#define RU_READY 0x40 /* RU is ready */ - -/* - * Receive Frame Descriptor (RFD) - */ -struct rfd_struct -{ - u8 stat_low; /* status word */ - u8 stat_high; /* status word */ - u8 rfd_sf; /* 82596 mode only */ - u8 last; /* Bit15,Last Frame on List / Bit14,suspend */ - u16 next; /* linkoffset to next RFD */ - u16 rbd_offset; /* pointeroffset to RBD-buffer */ - u8 dest[6]; /* ethernet-address, destination */ - u8 source[6]; /* ethernet-address, source */ - u16 length; /* 802.3 frame-length */ - u16 zero_dummy; /* dummy */ -}; - -#define RFD_LAST 0x80 /* last: last rfd in the list */ -#define RFD_SUSP 0x40 /* last: suspend RU after */ -#define RFD_COMPL 0x80 -#define RFD_OK 0x20 -#define RFD_BUSY 0x40 -#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */ -#define RFD_ERR_CRC 0x08 /* CRC error */ -#define RFD_ERR_ALGN 0x04 /* Alignment error */ -#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */ -#define RFD_ERR_OVR 0x01 /* DMA Overrun! */ - -#define RFD_ERR_FTS 0x0080 /* Frame to short */ -#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */ -#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */ -#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */ -#define RFD_COLLDET 0x0001 /* Detected collision during reception */ - -/* - * Receive Buffer Descriptor (RBD) - */ -struct rbd_struct -{ - u16 status; /* status word,number of used bytes in buff */ - u16 next; /* pointeroffset to next RBD */ - u32 buffer; /* receive buffer address pointer */ - u16 size; /* size of this buffer */ - u16 zero_dummy; /* dummy */ -}; - -#define RBD_LAST 0x8000 /* last buffer */ -#define RBD_USED 0x4000 /* this buffer has data */ -#define RBD_MASK 0x3fff /* size-mask for length */ - -/* - * Statusvalues for Commands/RFD - */ -#define STAT_COMPL 0x8000 /* status: frame/command is complete */ -#define STAT_BUSY 0x4000 /* status: frame/command is busy */ -#define STAT_OK 0x2000 /* status: frame/command is ok */ - -/* - * Action-Commands - */ -#define CMD_NOP 0x0000 /* NOP */ -#define CMD_IASETUP 0x0001 /* initial address setup command */ -#define CMD_CONFIGURE 0x0002 /* configure command */ -#define CMD_MCSETUP 0x0003 /* MC setup command */ -#define CMD_XMIT 0x0004 /* transmit command */ -#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */ -#define CMD_DUMP 0x0006 /* dump command */ -#define CMD_DIAGNOSE 0x0007 /* diagnose command */ - -/* - * Action command bits - */ -#define CMD_LAST 0x8000 /* indicates last command in the CBL */ -#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */ -#define CMD_INT 0x2000 /* generate interrupt after execution */ - -/* - * NOP - command - */ -struct nop_cmd_struct -{ - u16 cmd_status; /* status of this command */ - u16 cmd_cmd; /* the command itself (+bits) */ - u16 cmd_link; /* offsetpointer to next command */ -}; - -/* - * IA Setup command - */ -struct iasetup_cmd_struct -{ - u16 cmd_status; - u16 cmd_cmd; - u16 cmd_link; - u8 iaddr[6]; -}; - -/* - * Configure command - */ -struct configure_cmd_struct -{ - u16 cmd_status; - u16 cmd_cmd; - u16 cmd_link; - u8 byte_cnt; /* size of the config-cmd */ - u8 fifo; /* fifo/recv monitor */ - u8 sav_bf; /* save bad frames (bit7=1)*/ - u8 adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/ - u8 priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */ - u8 ifs; /* inter frame spacing */ - u8 time_low; /* slot time low */ - u8 time_high; /* slot time high(0-2) and max. retries(4-7) */ - u8 promisc; /* promisc-mode(0) , et al (1-7) */ - u8 carr_coll; /* carrier(0-3)/collision(4-7) stuff */ - u8 fram_len; /* minimal frame len */ - u8 dummy; /* dummy */ -}; - -/* - * Multicast Setup command - */ -struct mcsetup_cmd_struct -{ - u16 cmd_status; - u16 cmd_cmd; - u16 cmd_link; - u16 mc_cnt; /* number of bytes in the MC-List */ - u8 mc_list[0][6]; /* pointer to 6 bytes entries */ -}; - -/* - * DUMP command - */ -struct dump_cmd_struct -{ - u16 cmd_status; - u16 cmd_cmd; - u16 cmd_link; - u16 dump_offset; /* pointeroffset to DUMP space */ -}; - -/* - * transmit command - */ -struct transmit_cmd_struct -{ - u16 cmd_status; - u16 cmd_cmd; - u16 cmd_link; - u16 tbd_offset; /* pointeroffset to TBD */ - u8 dest[6]; /* destination address of the frame */ - u16 length; /* user defined: 802.3 length / Ether type */ -}; - -#define TCMD_ERRMASK 0x0fa0 -#define TCMD_MAXCOLLMASK 0x000f -#define TCMD_MAXCOLL 0x0020 -#define TCMD_HEARTBEAT 0x0040 -#define TCMD_DEFERRED 0x0080 -#define TCMD_UNDERRUN 0x0100 -#define TCMD_LOSTCTS 0x0200 -#define TCMD_NOCARRIER 0x0400 -#define TCMD_LATECOLL 0x0800 - -struct tdr_cmd_struct -{ - u16 cmd_status; - u16 cmd_cmd; - u16 cmd_link; - u16 status; -}; - -#define TDR_LNK_OK 0x8000 /* No link problem identified */ -#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */ -#define TDR_ET_OPN 0x2000 /* open, no correct termination */ -#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */ -#define TDR_TIMEMASK 0x07ff /* mask for the time field */ - -/* - * Transmit Buffer Descriptor (TBD) - */ -struct tbd_struct -{ - u16 size; /* size + EOF-Flag(15) */ - u16 next; /* pointeroffset to next TBD */ - u32 buffer; /* pointer to buffer */ -}; - -#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */ - - - - diff --git a/drivers/net/ethernet/i825xx/znet.c b/drivers/net/ethernet/i825xx/znet.c deleted file mode 100644 index c9479e081b8a..000000000000 --- a/drivers/net/ethernet/i825xx/znet.c +++ /dev/null @@ -1,928 +0,0 @@ -/* znet.c: An Zenith Z-Note ethernet driver for linux. */ - -/* - Written by Donald Becker. - - The author may be reached as becker@scyld.com. - This driver is based on the Linux skeleton driver. The copyright of the - skeleton driver is held by the United States Government, as represented - by DIRNSA, and it is released under the GPL. - - Thanks to Mike Hollick for alpha testing and suggestions. - - References: - The Crynwr packet driver. - - "82593 CSMA/CD Core LAN Controller" Intel datasheet, 1992 - Intel Microcommunications Databook, Vol. 1, 1990. - As usual with Intel, the documentation is incomplete and inaccurate. - I had to read the Crynwr packet driver to figure out how to actually - use the i82593, and guess at what register bits matched the loosely - related i82586. - - Theory of Operation - - The i82593 used in the Zenith Z-Note series operates using two(!) slave - DMA channels, one interrupt, and one 8-bit I/O port. - - While there several ways to configure '593 DMA system, I chose the one - that seemed commensurate with the highest system performance in the face - of moderate interrupt latency: Both DMA channels are configured as - recirculating ring buffers, with one channel (#0) dedicated to Rx and - the other channel (#1) to Tx and configuration. (Note that this is - different than the Crynwr driver, where the Tx DMA channel is initialized - before each operation. That approach simplifies operation and Tx error - recovery, but requires additional I/O in normal operation and precludes - transmit buffer chaining.) - - Both rings are set to 8192 bytes using {TX,RX}_RING_SIZE. This provides - a reasonable ring size for Rx, while simplifying DMA buffer allocation -- - DMA buffers must not cross a 128K boundary. (In truth the size selection - was influenced by my lack of '593 documentation. I thus was constrained - to use the Crynwr '593 initialization table, which sets the Rx ring size - to 8K.) - - Despite my usual low opinion about Intel-designed parts, I must admit - that the bulk data handling of the i82593 is a good design for - an integrated system, like a laptop, where using two slave DMA channels - doesn't pose a problem. I still take issue with using only a single I/O - port. In the same controlled environment there are essentially no - limitations on I/O space, and using multiple locations would eliminate - the need for multiple operations when looking at status registers, - setting the Rx ring boundary, or switching to promiscuous mode. - - I also question Zenith's selection of the '593: one of the advertised - advantages of earlier Intel parts was that if you figured out the magic - initialization incantation you could use the same part on many different - network types. Zenith's use of the "FriendlyNet" (sic) connector rather - than an on-board transceiver leads me to believe that they were planning - to take advantage of this. But, uhmmm, the '593 omits all but ethernet - functionality from the serial subsystem. - */ - -/* 10/2002 - - o Resurected for Linux 2.5+ by Marc Zyngier <maz@wild-wind.fr.eu.org> : - - - Removed strange DMA snooping in znet_sent_packet, which lead to - TX buffer corruption on my laptop. - - Use init_etherdev stuff. - - Use kmalloc-ed DMA buffers. - - Use as few global variables as possible. - - Use proper resources management. - - Use wireless/i82593.h as much as possible (structure, constants) - - Compiles as module or build-in. - - Now survives unplugging/replugging cable. - - Some code was taken from wavelan_cs. - - Tested on a vintage Zenith Z-Note 433Lnp+. Probably broken on - anything else. Testers (and detailed bug reports) are welcome :-). - - o TODO : - - - Properly handle multicast - - Understand why some traffic patterns add a 1s latency... - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/slab.h> -#include <linux/errno.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/if_arp.h> -#include <linux/bitops.h> - -#include <asm/io.h> -#include <asm/dma.h> - -#include <linux/i82593.h> - -static char version[] __initdata = "znet.c:v1.02 9/23/94 becker@scyld.com\n"; - -#ifndef ZNET_DEBUG -#define ZNET_DEBUG 1 -#endif -static unsigned int znet_debug = ZNET_DEBUG; -module_param (znet_debug, int, 0); -MODULE_PARM_DESC (znet_debug, "ZNet debug level"); -MODULE_LICENSE("GPL"); - -/* The DMA modes we need aren't in <dma.h>. */ -#define DMA_RX_MODE 0x14 /* Auto init, I/O to mem, ++, demand. */ -#define DMA_TX_MODE 0x18 /* Auto init, Mem to I/O, ++, demand. */ -#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17) -#define RX_BUF_SIZE 8192 -#define TX_BUF_SIZE 8192 -#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */ - -#define TX_TIMEOUT (HZ/10) - -struct znet_private { - int rx_dma, tx_dma; - spinlock_t lock; - short sia_base, sia_size, io_size; - struct i82593_conf_block i593_init; - /* The starting, current, and end pointers for the packet buffers. */ - ushort *rx_start, *rx_cur, *rx_end; - ushort *tx_start, *tx_cur, *tx_end; - ushort tx_buf_len; /* Tx buffer length, in words. */ -}; - -/* Only one can be built-in;-> */ -static struct net_device *znet_dev; - -#define NETIDBLK_MAGIC "NETIDBLK" -#define NETIDBLK_MAGIC_SIZE 8 - -struct netidblk { - char magic[NETIDBLK_MAGIC_SIZE]; /* The magic number (string) "NETIDBLK" */ - unsigned char netid[8]; /* The physical station address */ - char nettype, globalopt; - char vendor[8]; /* The machine vendor and product name. */ - char product[8]; - char irq1, irq2; /* Interrupts, only one is currently used. */ - char dma1, dma2; - short dma_mem_misc[8]; /* DMA buffer locations (unused in Linux). */ - short iobase1, iosize1; - short iobase2, iosize2; /* Second iobase unused. */ - char driver_options; /* Misc. bits */ - char pad; -}; - -static int znet_open(struct net_device *dev); -static netdev_tx_t znet_send_packet(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t znet_interrupt(int irq, void *dev_id); -static void znet_rx(struct net_device *dev); -static int znet_close(struct net_device *dev); -static void hardware_init(struct net_device *dev); -static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset); -static void znet_tx_timeout (struct net_device *dev); - -/* Request needed resources */ -static int znet_request_resources (struct net_device *dev) -{ - struct znet_private *znet = netdev_priv(dev); - - if (request_irq (dev->irq, znet_interrupt, 0, "ZNet", dev)) - goto failed; - if (request_dma (znet->rx_dma, "ZNet rx")) - goto free_irq; - if (request_dma (znet->tx_dma, "ZNet tx")) - goto free_rx_dma; - if (!request_region (znet->sia_base, znet->sia_size, "ZNet SIA")) - goto free_tx_dma; - if (!request_region (dev->base_addr, znet->io_size, "ZNet I/O")) - goto free_sia; - - return 0; /* Happy ! */ - - free_sia: - release_region (znet->sia_base, znet->sia_size); - free_tx_dma: - free_dma (znet->tx_dma); - free_rx_dma: - free_dma (znet->rx_dma); - free_irq: - free_irq (dev->irq, dev); - failed: - return -1; -} - -static void znet_release_resources (struct net_device *dev) -{ - struct znet_private *znet = netdev_priv(dev); - - release_region (znet->sia_base, znet->sia_size); - release_region (dev->base_addr, znet->io_size); - free_dma (znet->tx_dma); - free_dma (znet->rx_dma); - free_irq (dev->irq, dev); -} - -/* Keep the magical SIA stuff in a single function... */ -static void znet_transceiver_power (struct net_device *dev, int on) -{ - struct znet_private *znet = netdev_priv(dev); - unsigned char v; - - /* Turn on/off the 82501 SIA, using zenith-specific magic. */ - /* Select LAN control register */ - outb(0x10, znet->sia_base); - - if (on) - v = inb(znet->sia_base + 1) | 0x84; - else - v = inb(znet->sia_base + 1) & ~0x84; - - outb(v, znet->sia_base+1); /* Turn on/off LAN power (bit 2). */ -} - -/* Init the i82593, with current promisc/mcast configuration. - Also used from hardware_init. */ -static void znet_set_multicast_list (struct net_device *dev) -{ - struct znet_private *znet = netdev_priv(dev); - short ioaddr = dev->base_addr; - struct i82593_conf_block *cfblk = &znet->i593_init; - - memset(cfblk, 0x00, sizeof(struct i82593_conf_block)); - - /* The configuration block. What an undocumented nightmare. - The first set of values are those suggested (without explanation) - for ethernet in the Intel 82586 databook. The rest appear to be - completely undocumented, except for cryptic notes in the Crynwr - packet driver. This driver uses the Crynwr values verbatim. */ - - /* maz : Rewritten to take advantage of the wanvelan includes. - At least we have names, not just blind values */ - - /* Byte 0 */ - cfblk->fifo_limit = 10; /* = 16 B rx and 80 B tx fifo thresholds */ - cfblk->forgnesi = 0; /* 0=82C501, 1=AMD7992B compatibility */ - cfblk->fifo_32 = 1; - cfblk->d6mod = 0; /* Run in i82593 advanced mode */ - cfblk->throttle_enb = 1; - - /* Byte 1 */ - cfblk->throttle = 8; /* Continuous w/interrupts, 128-clock DMA. */ - cfblk->cntrxint = 0; /* enable continuous mode receive interrupts */ - cfblk->contin = 1; /* enable continuous mode */ - - /* Byte 2 */ - cfblk->addr_len = ETH_ALEN; - cfblk->acloc = 1; /* Disable source addr insertion by i82593 */ - cfblk->preamb_len = 2; /* 8 bytes preamble */ - cfblk->loopback = 0; /* Loopback off */ - - /* Byte 3 */ - cfblk->lin_prio = 0; /* Default priorities & backoff methods. */ - cfblk->tbofstop = 0; - cfblk->exp_prio = 0; - cfblk->bof_met = 0; - - /* Byte 4 */ - cfblk->ifrm_spc = 6; /* 96 bit times interframe spacing */ - - /* Byte 5 */ - cfblk->slottim_low = 0; /* 512 bit times slot time (low) */ - - /* Byte 6 */ - cfblk->slottim_hi = 2; /* 512 bit times slot time (high) */ - cfblk->max_retr = 15; /* 15 collisions retries */ - - /* Byte 7 */ - cfblk->prmisc = ((dev->flags & IFF_PROMISC) ? 1 : 0); /* Promiscuous mode */ - cfblk->bc_dis = 0; /* Enable broadcast reception */ - cfblk->crs_1 = 0; /* Don't transmit without carrier sense */ - cfblk->nocrc_ins = 0; /* i82593 generates CRC */ - cfblk->crc_1632 = 0; /* 32-bit Autodin-II CRC */ - cfblk->crs_cdt = 0; /* CD not to be interpreted as CS */ - - /* Byte 8 */ - cfblk->cs_filter = 0; /* CS is recognized immediately */ - cfblk->crs_src = 0; /* External carrier sense */ - cfblk->cd_filter = 0; /* CD is recognized immediately */ - - /* Byte 9 */ - cfblk->min_fr_len = ETH_ZLEN >> 2; /* Minimum frame length */ - - /* Byte A */ - cfblk->lng_typ = 1; /* Type/length checks OFF */ - cfblk->lng_fld = 1; /* Disable 802.3 length field check */ - cfblk->rxcrc_xf = 1; /* Don't transfer CRC to memory */ - cfblk->artx = 1; /* Disable automatic retransmission */ - cfblk->sarec = 1; /* Disable source addr trig of CD */ - cfblk->tx_jabber = 0; /* Disable jabber jam sequence */ - cfblk->hash_1 = 1; /* Use bits 0-5 in mc address hash */ - cfblk->lbpkpol = 0; /* Loopback pin active high */ - - /* Byte B */ - cfblk->fdx = 0; /* Disable full duplex operation */ - - /* Byte C */ - cfblk->dummy_6 = 0x3f; /* all ones, Default multicast addresses & backoff. */ - cfblk->mult_ia = 0; /* No multiple individual addresses */ - cfblk->dis_bof = 0; /* Disable the backoff algorithm ?! */ - - /* Byte D */ - cfblk->dummy_1 = 1; /* set to 1 */ - cfblk->tx_ifs_retrig = 3; /* Hmm... Disabled */ - cfblk->mc_all = (!netdev_mc_empty(dev) || - (dev->flags & IFF_ALLMULTI)); /* multicast all mode */ - cfblk->rcv_mon = 0; /* Monitor mode disabled */ - cfblk->frag_acpt = 0; /* Do not accept fragments */ - cfblk->tstrttrs = 0; /* No start transmission threshold */ - - /* Byte E */ - cfblk->fretx = 1; /* FIFO automatic retransmission */ - cfblk->runt_eop = 0; /* drop "runt" packets */ - cfblk->hw_sw_pin = 0; /* ?? */ - cfblk->big_endn = 0; /* Big Endian ? no... */ - cfblk->syncrqs = 1; /* Synchronous DRQ deassertion... */ - cfblk->sttlen = 1; /* 6 byte status registers */ - cfblk->rx_eop = 0; /* Signal EOP on packet reception */ - cfblk->tx_eop = 0; /* Signal EOP on packet transmission */ - - /* Byte F */ - cfblk->rbuf_size = RX_BUF_SIZE >> 12; /* Set receive buffer size */ - cfblk->rcvstop = 1; /* Enable Receive Stop Register */ - - if (znet_debug > 2) { - int i; - unsigned char *c; - - for (i = 0, c = (char *) cfblk; i < sizeof (*cfblk); i++) - printk ("%02X ", c[i]); - printk ("\n"); - } - - *znet->tx_cur++ = sizeof(struct i82593_conf_block); - memcpy(znet->tx_cur, cfblk, sizeof(struct i82593_conf_block)); - znet->tx_cur += sizeof(struct i82593_conf_block)/2; - outb(OP0_CONFIGURE | CR0_CHNL, ioaddr); - - /* XXX FIXME maz : Add multicast addresses here, so having a - * multicast address configured isn't equal to IFF_ALLMULTI */ -} - -static const struct net_device_ops znet_netdev_ops = { - .ndo_open = znet_open, - .ndo_stop = znet_close, - .ndo_start_xmit = znet_send_packet, - .ndo_set_rx_mode = znet_set_multicast_list, - .ndo_tx_timeout = znet_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe - BIOS area. We just scan for the signature, and pull the vital parameters - out of the structure. */ - -static int __init znet_probe (void) -{ - int i; - struct netidblk *netinfo; - struct znet_private *znet; - struct net_device *dev; - char *p; - char *plast = phys_to_virt(0x100000 - NETIDBLK_MAGIC_SIZE); - int err = -ENOMEM; - - /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */ - for(p = (char *)phys_to_virt(0xf0000); p <= plast; p++) - if (*p == 'N' && - strncmp(p, NETIDBLK_MAGIC, NETIDBLK_MAGIC_SIZE) == 0) - break; - - if (p > plast) { - if (znet_debug > 1) - printk(KERN_INFO "No Z-Note ethernet adaptor found.\n"); - return -ENODEV; - } - - dev = alloc_etherdev(sizeof(struct znet_private)); - if (!dev) - return -ENOMEM; - - znet = netdev_priv(dev); - - netinfo = (struct netidblk *)p; - dev->base_addr = netinfo->iobase1; - dev->irq = netinfo->irq1; - - /* The station address is in the "netidblk" at 0x0f0000. */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = netinfo->netid[i]; - - printk(KERN_INFO "%s: ZNET at %#3lx, %pM" - ", using IRQ %d DMA %d and %d.\n", - dev->name, dev->base_addr, dev->dev_addr, - dev->irq, netinfo->dma1, netinfo->dma2); - - if (znet_debug > 1) { - printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n", - dev->name, netinfo->vendor, - netinfo->irq1, netinfo->irq2, - netinfo->dma1, netinfo->dma2); - printk(KERN_INFO "%s: iobase1 %#x size %d iobase2 %#x size %d net type %2.2x.\n", - dev->name, netinfo->iobase1, netinfo->iosize1, - netinfo->iobase2, netinfo->iosize2, netinfo->nettype); - } - - if (znet_debug > 0) - printk(KERN_INFO "%s", version); - - znet->rx_dma = netinfo->dma1; - znet->tx_dma = netinfo->dma2; - spin_lock_init(&znet->lock); - znet->sia_base = 0xe6; /* Magic address for the 82501 SIA */ - znet->sia_size = 2; - /* maz: Despite the '593 being advertised above as using a - * single 8bits I/O port, this driver does many 16bits - * access. So set io_size accordingly */ - znet->io_size = 2; - - if (!(znet->rx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA))) - goto free_dev; - if (!(znet->tx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA))) - goto free_rx; - - if (!dma_page_eq (znet->rx_start, znet->rx_start + (RX_BUF_SIZE/2-1)) || - !dma_page_eq (znet->tx_start, znet->tx_start + (TX_BUF_SIZE/2-1))) { - printk (KERN_WARNING "tx/rx crossing DMA frontiers, giving up\n"); - goto free_tx; - } - - znet->rx_end = znet->rx_start + RX_BUF_SIZE/2; - znet->tx_buf_len = TX_BUF_SIZE/2; - znet->tx_end = znet->tx_start + znet->tx_buf_len; - - /* The ZNET-specific entries in the device structure. */ - dev->netdev_ops = &znet_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - err = register_netdev(dev); - if (err) - goto free_tx; - znet_dev = dev; - return 0; - - free_tx: - kfree(znet->tx_start); - free_rx: - kfree(znet->rx_start); - free_dev: - free_netdev(dev); - return err; -} - - -static int znet_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - if (znet_debug > 2) - printk(KERN_DEBUG "%s: znet_open() called.\n", dev->name); - - /* These should never fail. You can't add devices to a sealed box! */ - if (znet_request_resources (dev)) { - printk(KERN_WARNING "%s: Not opened -- resource busy?!?\n", dev->name); - return -EBUSY; - } - - znet_transceiver_power (dev, 1); - - /* According to the Crynwr driver we should wait 50 msec. for the - LAN clock to stabilize. My experiments indicates that the '593 can - be initialized immediately. The delay is probably needed for the - DC-to-DC converter to come up to full voltage, and for the oscillator - to be spot-on at 20Mhz before transmitting. - Until this proves to be a problem we rely on the higher layers for the - delay and save allocating a timer entry. */ - - /* maz : Well, I'm getting every time the following message - * without the delay on a 486@33. This machine is much too - * fast... :-) So maybe the Crynwr driver wasn't wrong after - * all, even if the message is completly harmless on my - * setup. */ - mdelay (50); - - /* This follows the packet driver's lead, and checks for success. */ - if (inb(ioaddr) != 0x10 && inb(ioaddr) != 0x00) - printk(KERN_WARNING "%s: Problem turning on the transceiver power.\n", - dev->name); - - hardware_init(dev); - netif_start_queue (dev); - - return 0; -} - - -static void znet_tx_timeout (struct net_device *dev) -{ - int ioaddr = dev->base_addr; - ushort event, tx_status, rx_offset, state; - - outb (CR0_STATUS_0, ioaddr); - event = inb (ioaddr); - outb (CR0_STATUS_1, ioaddr); - tx_status = inw (ioaddr); - outb (CR0_STATUS_2, ioaddr); - rx_offset = inw (ioaddr); - outb (CR0_STATUS_3, ioaddr); - state = inb (ioaddr); - printk (KERN_WARNING "%s: transmit timed out, status %02x %04x %04x %02x," - " resetting.\n", dev->name, event, tx_status, rx_offset, state); - if (tx_status == TX_LOST_CRS) - printk (KERN_WARNING "%s: Tx carrier error, check transceiver cable.\n", - dev->name); - outb (OP0_RESET, ioaddr); - hardware_init (dev); - netif_wake_queue (dev); -} - -static netdev_tx_t znet_send_packet(struct sk_buff *skb, struct net_device *dev) -{ - int ioaddr = dev->base_addr; - struct znet_private *znet = netdev_priv(dev); - unsigned long flags; - short length = skb->len; - - if (znet_debug > 4) - printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name); - - if (length < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - length = ETH_ZLEN; - } - - netif_stop_queue (dev); - - /* Check that the part hasn't reset itself, probably from suspend. */ - outb(CR0_STATUS_0, ioaddr); - if (inw(ioaddr) == 0x0010 && - inw(ioaddr) == 0x0000 && - inw(ioaddr) == 0x0010) { - if (znet_debug > 1) - printk (KERN_WARNING "%s : waking up\n", dev->name); - hardware_init(dev); - znet_transceiver_power (dev, 1); - } - - if (1) { - unsigned char *buf = (void *)skb->data; - ushort *tx_link = znet->tx_cur - 1; - ushort rnd_len = (length + 1)>>1; - - dev->stats.tx_bytes+=length; - - if (znet->tx_cur >= znet->tx_end) - znet->tx_cur = znet->tx_start; - *znet->tx_cur++ = length; - if (znet->tx_cur + rnd_len + 1 > znet->tx_end) { - int semi_cnt = (znet->tx_end - znet->tx_cur)<<1; /* Cvrt to byte cnt. */ - memcpy(znet->tx_cur, buf, semi_cnt); - rnd_len -= semi_cnt>>1; - memcpy(znet->tx_start, buf + semi_cnt, length - semi_cnt); - znet->tx_cur = znet->tx_start + rnd_len; - } else { - memcpy(znet->tx_cur, buf, skb->len); - znet->tx_cur += rnd_len; - } - *znet->tx_cur++ = 0; - - spin_lock_irqsave(&znet->lock, flags); - { - *tx_link = OP0_TRANSMIT | CR0_CHNL; - /* Is this always safe to do? */ - outb(OP0_TRANSMIT | CR0_CHNL, ioaddr); - } - spin_unlock_irqrestore (&znet->lock, flags); - - netif_start_queue (dev); - - if (znet_debug > 4) - printk(KERN_DEBUG "%s: Transmitter queued, length %d.\n", dev->name, length); - } - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -/* The ZNET interrupt handler. */ -static irqreturn_t znet_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct znet_private *znet = netdev_priv(dev); - int ioaddr; - int boguscnt = 20; - int handled = 0; - - spin_lock (&znet->lock); - - ioaddr = dev->base_addr; - - outb(CR0_STATUS_0, ioaddr); - do { - ushort status = inb(ioaddr); - if (znet_debug > 5) { - ushort result, rx_ptr, running; - outb(CR0_STATUS_1, ioaddr); - result = inw(ioaddr); - outb(CR0_STATUS_2, ioaddr); - rx_ptr = inw(ioaddr); - outb(CR0_STATUS_3, ioaddr); - running = inb(ioaddr); - printk(KERN_DEBUG "%s: interrupt, status %02x, %04x %04x %02x serial %d.\n", - dev->name, status, result, rx_ptr, running, boguscnt); - } - if ((status & SR0_INTERRUPT) == 0) - break; - - handled = 1; - - if ((status & SR0_EVENT_MASK) == SR0_TRANSMIT_DONE || - (status & SR0_EVENT_MASK) == SR0_RETRANSMIT_DONE || - (status & SR0_EVENT_MASK) == SR0_TRANSMIT_NO_CRC_DONE) { - int tx_status; - outb(CR0_STATUS_1, ioaddr); - tx_status = inw(ioaddr); - /* It's undocumented, but tx_status seems to match the i82586. */ - if (tx_status & TX_OK) { - dev->stats.tx_packets++; - dev->stats.collisions += tx_status & TX_NCOL_MASK; - } else { - if (tx_status & (TX_LOST_CTS | TX_LOST_CRS)) - dev->stats.tx_carrier_errors++; - if (tx_status & TX_UND_RUN) - dev->stats.tx_fifo_errors++; - if (!(tx_status & TX_HRT_BEAT)) - dev->stats.tx_heartbeat_errors++; - if (tx_status & TX_MAX_COL) - dev->stats.tx_aborted_errors++; - /* ...and the catch-all. */ - if ((tx_status | (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) != (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) - dev->stats.tx_errors++; - - /* Transceiver may be stuck if cable - * was removed while emitting a - * packet. Flip it off, then on to - * reset it. This is very empirical, - * but it seems to work. */ - - znet_transceiver_power (dev, 0); - znet_transceiver_power (dev, 1); - } - netif_wake_queue (dev); - } - - if ((status & SR0_RECEPTION) || - (status & SR0_EVENT_MASK) == SR0_STOP_REG_HIT) { - znet_rx(dev); - } - /* Clear the interrupts we've handled. */ - outb(CR0_INT_ACK, ioaddr); - } while (boguscnt--); - - spin_unlock (&znet->lock); - - return IRQ_RETVAL(handled); -} - -static void znet_rx(struct net_device *dev) -{ - struct znet_private *znet = netdev_priv(dev); - int ioaddr = dev->base_addr; - int boguscount = 1; - short next_frame_end_offset = 0; /* Offset of next frame start. */ - short *cur_frame_end; - short cur_frame_end_offset; - - outb(CR0_STATUS_2, ioaddr); - cur_frame_end_offset = inw(ioaddr); - - if (cur_frame_end_offset == znet->rx_cur - znet->rx_start) { - printk(KERN_WARNING "%s: Interrupted, but nothing to receive, offset %03x.\n", - dev->name, cur_frame_end_offset); - return; - } - - /* Use same method as the Crynwr driver: construct a forward list in - the same area of the backwards links we now have. This allows us to - pass packets to the upper layers in the order they were received -- - important for fast-path sequential operations. */ - while (znet->rx_start + cur_frame_end_offset != znet->rx_cur && - ++boguscount < 5) { - unsigned short hi_cnt, lo_cnt, hi_status, lo_status; - int count, status; - - if (cur_frame_end_offset < 4) { - /* Oh no, we have a special case: the frame trailer wraps around - the end of the ring buffer. We've saved space at the end of - the ring buffer for just this problem. */ - memcpy(znet->rx_end, znet->rx_start, 8); - cur_frame_end_offset += (RX_BUF_SIZE/2); - } - cur_frame_end = znet->rx_start + cur_frame_end_offset - 4; - - lo_status = *cur_frame_end++; - hi_status = *cur_frame_end++; - status = ((hi_status & 0xff) << 8) + (lo_status & 0xff); - lo_cnt = *cur_frame_end++; - hi_cnt = *cur_frame_end++; - count = ((hi_cnt & 0xff) << 8) + (lo_cnt & 0xff); - - if (znet_debug > 5) - printk(KERN_DEBUG "Constructing trailer at location %03x, %04x %04x %04x %04x" - " count %#x status %04x.\n", - cur_frame_end_offset<<1, lo_status, hi_status, lo_cnt, hi_cnt, - count, status); - cur_frame_end[-4] = status; - cur_frame_end[-3] = next_frame_end_offset; - cur_frame_end[-2] = count; - next_frame_end_offset = cur_frame_end_offset; - cur_frame_end_offset -= ((count + 1)>>1) + 3; - if (cur_frame_end_offset < 0) - cur_frame_end_offset += RX_BUF_SIZE/2; - } - - /* Now step forward through the list. */ - do { - ushort *this_rfp_ptr = znet->rx_start + next_frame_end_offset; - int status = this_rfp_ptr[-4]; - int pkt_len = this_rfp_ptr[-2]; - - if (znet_debug > 5) - printk(KERN_DEBUG "Looking at trailer ending at %04x status %04x length %03x" - " next %04x.\n", next_frame_end_offset<<1, status, pkt_len, - this_rfp_ptr[-3]<<1); - /* Once again we must assume that the i82586 docs apply. */ - if ( ! (status & RX_RCV_OK)) { /* There was an error. */ - dev->stats.rx_errors++; - if (status & RX_CRC_ERR) dev->stats.rx_crc_errors++; - if (status & RX_ALG_ERR) dev->stats.rx_frame_errors++; -#if 0 - if (status & 0x0200) dev->stats.rx_over_errors++; /* Wrong. */ - if (status & 0x0100) dev->stats.rx_fifo_errors++; -#else - /* maz : Wild guess... */ - if (status & RX_OVRRUN) dev->stats.rx_over_errors++; -#endif - if (status & RX_SRT_FRM) dev->stats.rx_length_errors++; - } else if (pkt_len > 1536) { - dev->stats.rx_length_errors++; - } else { - /* Malloc up new buffer. */ - struct sk_buff *skb; - - skb = netdev_alloc_skb(dev, pkt_len); - if (skb == NULL) { - if (znet_debug) - printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); - dev->stats.rx_dropped++; - break; - } - - if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) { - int semi_cnt = (znet->rx_end - znet->rx_cur)<<1; - memcpy(skb_put(skb,semi_cnt), znet->rx_cur, semi_cnt); - memcpy(skb_put(skb,pkt_len-semi_cnt), znet->rx_start, - pkt_len - semi_cnt); - } else { - memcpy(skb_put(skb,pkt_len), znet->rx_cur, pkt_len); - if (znet_debug > 6) { - unsigned int *packet = (unsigned int *) skb->data; - printk(KERN_DEBUG "Packet data is %08x %08x %08x %08x.\n", packet[0], - packet[1], packet[2], packet[3]); - } - } - skb->protocol=eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - znet->rx_cur = this_rfp_ptr; - if (znet->rx_cur >= znet->rx_end) - znet->rx_cur -= RX_BUF_SIZE/2; - update_stop_hit(ioaddr, (znet->rx_cur - znet->rx_start)<<1); - next_frame_end_offset = this_rfp_ptr[-3]; - if (next_frame_end_offset == 0) /* Read all the frames? */ - break; /* Done for now */ - this_rfp_ptr = znet->rx_start + next_frame_end_offset; - } while (--boguscount); - - /* If any worth-while packets have been received, dev_rint() - has done a mark_bh(INET_BH) for us and will work on them - when we get to the bottom-half routine. */ -} - -/* The inverse routine to znet_open(). */ -static int znet_close(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - netif_stop_queue (dev); - - outb(OP0_RESET, ioaddr); /* CMD0_RESET */ - - if (znet_debug > 1) - printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); - /* Turn off transceiver power. */ - znet_transceiver_power (dev, 0); - - znet_release_resources (dev); - - return 0; -} - -static void show_dma(struct net_device *dev) -{ - short ioaddr = dev->base_addr; - unsigned char stat = inb (ioaddr); - struct znet_private *znet = netdev_priv(dev); - unsigned long flags; - short dma_port = ((znet->tx_dma&3)<<2) + IO_DMA2_BASE; - unsigned addr = inb(dma_port); - short residue; - - addr |= inb(dma_port) << 8; - residue = get_dma_residue(znet->tx_dma); - - if (znet_debug > 1) { - flags=claim_dma_lock(); - printk(KERN_DEBUG "Stat:%02x Addr: %04x cnt:%3x\n", - stat, addr<<1, residue); - release_dma_lock(flags); - } -} - -/* Initialize the hardware. We have to do this when the board is open()ed - or when we come out of suspend mode. */ -static void hardware_init(struct net_device *dev) -{ - unsigned long flags; - short ioaddr = dev->base_addr; - struct znet_private *znet = netdev_priv(dev); - - znet->rx_cur = znet->rx_start; - znet->tx_cur = znet->tx_start; - - /* Reset the chip, and start it up. */ - outb(OP0_RESET, ioaddr); - - flags=claim_dma_lock(); - disable_dma(znet->rx_dma); /* reset by an interrupting task. */ - clear_dma_ff(znet->rx_dma); - set_dma_mode(znet->rx_dma, DMA_RX_MODE); - set_dma_addr(znet->rx_dma, isa_virt_to_bus(znet->rx_start)); - set_dma_count(znet->rx_dma, RX_BUF_SIZE); - enable_dma(znet->rx_dma); - /* Now set up the Tx channel. */ - disable_dma(znet->tx_dma); - clear_dma_ff(znet->tx_dma); - set_dma_mode(znet->tx_dma, DMA_TX_MODE); - set_dma_addr(znet->tx_dma, isa_virt_to_bus(znet->tx_start)); - set_dma_count(znet->tx_dma, znet->tx_buf_len<<1); - enable_dma(znet->tx_dma); - release_dma_lock(flags); - - if (znet_debug > 1) - printk(KERN_DEBUG "%s: Initializing the i82593, rx buf %p tx buf %p\n", - dev->name, znet->rx_start,znet->tx_start); - /* Do an empty configure command, just like the Crynwr driver. This - resets to chip to its default values. */ - *znet->tx_cur++ = 0; - *znet->tx_cur++ = 0; - show_dma(dev); - outb(OP0_CONFIGURE | CR0_CHNL, ioaddr); - - znet_set_multicast_list (dev); - - *znet->tx_cur++ = 6; - memcpy(znet->tx_cur, dev->dev_addr, 6); - znet->tx_cur += 3; - show_dma(dev); - outb(OP0_IA_SETUP | CR0_CHNL, ioaddr); - show_dma(dev); - - update_stop_hit(ioaddr, 8192); - if (znet_debug > 1) printk(KERN_DEBUG "enabling Rx.\n"); - outb(OP0_RCV_ENABLE, ioaddr); - netif_start_queue (dev); -} - -static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset) -{ - outb(OP0_SWIT_TO_PORT_1 | CR0_CHNL, ioaddr); - if (znet_debug > 5) - printk(KERN_DEBUG "Updating stop hit with value %02x.\n", - (rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE); - outb((rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE, ioaddr); - outb(OP1_SWIT_TO_PORT_0, ioaddr); -} - -static __exit void znet_cleanup (void) -{ - if (znet_dev) { - struct znet_private *znet = netdev_priv(znet_dev); - - unregister_netdev (znet_dev); - kfree (znet->rx_start); - kfree (znet->tx_start); - free_netdev (znet_dev); - } -} - -module_init (znet_probe); -module_exit (znet_cleanup); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 19b64de7124b..328f47c92e26 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -76,16 +76,16 @@ MODULE_PARM_DESC(msg_level, "msg_level"); MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical " "port to stack. 1:yes, 0:no. Default = 0 "); MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 " - "[2^x - 1], x = [6..14]. Default = " + "[2^x - 1], x = [7..14]. Default = " __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")"); MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 " - "[2^x - 1], x = [6..14]. Default = " + "[2^x - 1], x = [7..14]. Default = " __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")"); MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 " - "[2^x - 1], x = [6..14]. Default = " + "[2^x - 1], x = [7..14]. Default = " __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")"); MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " - "[2^x - 1], x = [6..14]. Default = " + "[2^x - 1], x = [7..14]. Default = " __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, " "Default = 1"); @@ -1921,10 +1921,8 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) u64 hret; ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC); - if (!ehea_mcl_entry) { - pr_err("no mem for mcl_entry\n"); + if (!ehea_mcl_entry) return; - } INIT_LIST_HEAD(&ehea_mcl_entry->list); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c index 27f881758d16..9b03033bb557 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c @@ -64,11 +64,10 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages, } queue->queue_length = nr_of_pages * pagesize; - queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); - if (!queue->queue_pages) { - pr_err("no mem for queue_pages\n"); + queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *), + GFP_KERNEL); + if (!queue->queue_pages) return -ENOMEM; - } /* * allocate pages for queue: @@ -129,10 +128,8 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, void *vpage; cq = kzalloc(sizeof(*cq), GFP_KERNEL); - if (!cq) { - pr_err("no mem for cq\n"); + if (!cq) goto out_nomem; - } cq->attr.max_nr_of_cqes = nr_of_cqe; cq->attr.cq_token = cq_token; @@ -257,10 +254,8 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, struct ehea_eq *eq; eq = kzalloc(sizeof(*eq), GFP_KERNEL); - if (!eq) { - pr_err("no mem for eq\n"); + if (!eq) return NULL; - } eq->adapter = adapter; eq->attr.type = type; @@ -428,10 +423,8 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, qp = kzalloc(sizeof(*qp), GFP_KERNEL); - if (!qp) { - pr_err("no mem for qp\n"); + if (!qp) return NULL; - } qp->adapter = adapter; diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 256bdb8e1994..4989481c19f0 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2190,11 +2190,10 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev, { struct emac_instance *dev = netdev_priv(ndev); - strcpy(info->driver, "ibm_emac"); - strcpy(info->version, DRV_VERSION); - info->fw_version[0] = '\0'; - sprintf(info->bus_info, "PPC 4xx EMAC-%d %s", - dev->cell_index, dev->ofdev->dev.of_node->full_name); + strlcpy(info->driver, "ibm_emac", sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s", + dev->cell_index, dev->ofdev->dev.of_node->full_name); info->regdump_len = emac_ethtool_get_regs_len(ndev); } diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 50ea12bfb579..1f7ecf57181e 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -528,12 +528,9 @@ static int mal_probe(struct platform_device *ofdev) irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde; mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL); - if (!mal) { - printk(KERN_ERR - "mal%d: out of memory allocating MAL structure!\n", - index); + if (!mal) return -ENOMEM; - } + mal->index = index; mal->ofdev = ofdev; mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index f2fdbb79837e..c859771a9902 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -637,7 +637,6 @@ static int ibmveth_open(struct net_device *netdev) adapter->bounce_buffer = kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); if (!adapter->bounce_buffer) { - netdev_err(netdev, "unable to allocate bounce buffer\n"); rc = -ENOMEM; goto err_out_free_irq; } @@ -722,9 +721,8 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1); - strncpy(info->version, ibmveth_driver_version, - sizeof(info->version) - 1); + strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver)); + strlcpy(info->version, ibmveth_driver_version, sizeof(info->version)); } static netdev_features_t ibmveth_fix_features(struct net_device *dev, diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index ddee4060948a..3d5f6d463757 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -6,7 +6,7 @@ config NET_VENDOR_INTEL bool "Intel devices" default y depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \ - ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \ + ARCH_ACORN || SNI_RM || SUN3 || \ GSC || BVME6000 || MVME16x || \ (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \ EXPERIMENTAL @@ -74,6 +74,7 @@ config E1000E tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" depends on PCI && (!SPARC32 || BROKEN) select CRC32 + select PTP_1588_CLOCK ---help--- This driver supports the PCI-Express Intel(R) PRO/1000 gigabit ethernet family of adapters. For PCI or PCI-X e1000 adapters, @@ -94,6 +95,8 @@ config IGB tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" depends on PCI select PTP_1588_CLOCK + select I2C + select I2C_ALGOBIT ---help--- This driver supports Intel(R) 82575/82576 gigabit ethernet family of adapters. For more information on how to identify your adapter, go @@ -112,6 +115,17 @@ config IGB To compile this driver as a module, choose M here. The module will be called igb. +config IGB_HWMON + bool "Intel(R) PCI-Express Gigabit adapters HWMON support" + default y + depends on IGB && HWMON && !(IGB=y && HWMON=m) + ---help--- + Say Y if you want to expose thermal sensor data on Intel devices. + + Some of our devices contain thermal sensors, both external and internal. + This data is available via the hwmon sysfs interface and exposes + the onboard sensors. + config IGB_DCA bool "Direct Cache Access (DCA) Support" default y diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index a59f0779e1c3..ec800b093e7e 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2928,8 +2928,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e100_phy_init(nic); memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); - memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN); - if (!is_valid_ether_addr(netdev->perm_addr)) { + if (!is_valid_ether_addr(netdev->dev_addr)) { if (!eeprom_bad_csum_allow) { netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); err = -EAGAIN; diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 2b6cd02bfba0..26d9cd59ec75 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -81,68 +81,69 @@ struct e1000_adapter; #include "e1000_hw.h" -#define E1000_MAX_INTR 10 +#define E1000_MAX_INTR 10 /* TX/RX descriptor defines */ -#define E1000_DEFAULT_TXD 256 -#define E1000_MAX_TXD 256 -#define E1000_MIN_TXD 48 -#define E1000_MAX_82544_TXD 4096 +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 48 +#define E1000_MAX_82544_TXD 4096 -#define E1000_DEFAULT_RXD 256 -#define E1000_MAX_RXD 256 -#define E1000_MIN_RXD 48 -#define E1000_MAX_82544_RXD 4096 +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 48 +#define E1000_MAX_82544_RXD 4096 #define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ #define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ /* this is the size past which hardware will drop packets when setting LPE=0 */ -#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 /* Supported Rx Buffer Sizes */ -#define E1000_RXBUFFER_128 128 /* Used for packet split */ -#define E1000_RXBUFFER_256 256 /* Used for packet split */ -#define E1000_RXBUFFER_512 512 -#define E1000_RXBUFFER_1024 1024 -#define E1000_RXBUFFER_2048 2048 -#define E1000_RXBUFFER_4096 4096 -#define E1000_RXBUFFER_8192 8192 -#define E1000_RXBUFFER_16384 16384 +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 /* SmartSpeed delimiters */ -#define E1000_SMARTSPEED_DOWNSHIFT 3 -#define E1000_SMARTSPEED_MAX 15 +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 /* Packet Buffer allocations */ -#define E1000_PBA_BYTES_SHIFT 0xA -#define E1000_TX_HEAD_ADDR_SHIFT 7 -#define E1000_PBA_TX_MASK 0xFFFF0000 +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 /* Flow Control Watermarks */ -#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ -#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ -#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ +#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ /* How many Tx Descriptors do we need to call netif_wake_queue ? */ #define E1000_TX_QUEUE_WAKE 16 /* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define AUTO_ALL_MODES 0 -#define E1000_EEPROM_82544_APM 0x0004 -#define E1000_EEPROM_APME 0x0400 +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 #ifndef E1000_MASTER_SLAVE /* Switch to override PHY master/slave setting */ #define E1000_MASTER_SLAVE e1000_ms_hw_default #endif -#define E1000_MNG_VLAN_NONE (-1) +#define E1000_MNG_VLAN_NONE (-1) /* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer */ + * so a DMA handle can be stored along with the buffer + */ struct e1000_buffer { struct sk_buff *skb; dma_addr_t dma; diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 14e30515f6aa..43462d596a4e 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -115,12 +115,12 @@ static int e1000_get_settings(struct net_device *netdev, if (hw->media_type == e1000_media_type_copper) { ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full| - SUPPORTED_Autoneg | - SUPPORTED_TP); + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); ecmd->advertising = ADVERTISED_TP; if (hw->autoneg == 1) { @@ -161,8 +161,8 @@ static int e1000_get_settings(struct net_device *netdev, ethtool_cmd_speed_set(ecmd, adapter->link_speed); /* unfortunately FULL_DUPLEX != DUPLEX_FULL - * and HALF_DUPLEX != DUPLEX_HALF */ - + * and HALF_DUPLEX != DUPLEX_HALF + */ if (adapter->link_duplex == FULL_DUPLEX) ecmd->duplex = DUPLEX_FULL; else @@ -179,8 +179,7 @@ static int e1000_get_settings(struct net_device *netdev, if ((hw->media_type == e1000_media_type_copper) && netif_carrier_ok(netdev)) ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? - ETH_TP_MDI_X : - ETH_TP_MDI); + ETH_TP_MDI_X : ETH_TP_MDI); else ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; @@ -197,8 +196,7 @@ static int e1000_set_settings(struct net_device *netdev, struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - /* - * MDI setting is only allowed when autoneg enabled because + /* MDI setting is only allowed when autoneg enabled because * some hardware doesn't allow MDI setting when speed or * duplex is forced. */ @@ -224,8 +222,8 @@ static int e1000_set_settings(struct net_device *netdev, ADVERTISED_Autoneg; else hw->autoneg_advertised = ecmd->advertising | - ADVERTISED_TP | - ADVERTISED_Autoneg; + ADVERTISED_TP | + ADVERTISED_Autoneg; ecmd->advertising = hw->autoneg_advertised; } else { u32 speed = ethtool_cmd_speed(ecmd); @@ -260,8 +258,7 @@ static u32 e1000_get_link(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); - /* - * If the link is not reported up to netdev, interrupts are disabled, + /* If the link is not reported up to netdev, interrupts are disabled, * and so the physical link state may have changed since we last * looked. Set get_link_status to make sure that the true link * state is interrogated, rather than pulling a cached and possibly @@ -484,7 +481,7 @@ static int e1000_get_eeprom(struct net_device *netdev, le16_to_cpus(&eeprom_buff[i]); memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), - eeprom->len); + eeprom->len); kfree(eeprom_buff); return ret_val; @@ -517,15 +514,17 @@ static int e1000_set_eeprom(struct net_device *netdev, ptr = (void *)eeprom_buff; if (eeprom->offset & 1) { - /* need read/modify/write of first changed EEPROM word */ - /* only the second byte of the word is being modified */ + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ ret_val = e1000_read_eeprom(hw, first_word, 1, &eeprom_buff[0]); ptr++; } if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { - /* need read/modify/write of last changed EEPROM word */ - /* only the first byte of the word is being modified */ + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ ret_val = e1000_read_eeprom(hw, last_word, 1, &eeprom_buff[last_word - first_word]); } @@ -606,11 +605,13 @@ static int e1000_set_ringparam(struct net_device *netdev, rx_old = adapter->rx_ring; err = -ENOMEM; - txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL); + txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), + GFP_KERNEL); if (!txdr) goto err_alloc_tx; - rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL); + rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), + GFP_KERNEL); if (!rxdr) goto err_alloc_rx; @@ -619,12 +620,12 @@ static int e1000_set_ringparam(struct net_device *netdev, rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD); rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ? - E1000_MAX_RXD : E1000_MAX_82544_RXD)); + E1000_MAX_RXD : E1000_MAX_82544_RXD)); rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD); txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ? - E1000_MAX_TXD : E1000_MAX_82544_TXD)); + E1000_MAX_TXD : E1000_MAX_82544_TXD)); txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); for (i = 0; i < adapter->num_tx_queues; i++) @@ -642,7 +643,8 @@ static int e1000_set_ringparam(struct net_device *netdev, goto err_setup_tx; /* save the new, restore the old in order to free it, - * then restore the new back again */ + * then restore the new back again + */ adapter->rx_ring = rx_old; adapter->tx_ring = tx_old; @@ -784,7 +786,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); if (hw->mac_type >= e1000_82543) { - REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); @@ -795,14 +796,11 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, 0xFFFFFFFF); } - } else { - REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); - } value = E1000_MC_TBL_SIZE; @@ -858,13 +856,14 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) *data = 0; - /* NOTE: we don't test MSI interrupts here, yet */ - /* Hook up test interrupt handler just for this test */ + /* NOTE: we don't test MSI interrupts here, yet + * Hook up test interrupt handler just for this test + */ if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, - netdev)) + netdev)) shared_int = false; else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, - netdev->name, netdev)) { + netdev->name, netdev)) { *data = 1; return -1; } @@ -1253,14 +1252,15 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ - E1000_CTRL_FD); /* Force Duplex to FULL */ + E1000_CTRL_FD); /* Force Duplex to FULL */ if (hw->media_type == e1000_media_type_copper && hw->phy_type == e1000_phy_m88) ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ else { /* Set the ILOS bit on the fiber Nic is half - * duplex link is detected. */ + * duplex link is detected. + */ stat_reg = er32(STATUS); if ((stat_reg & E1000_STATUS_FD) == 0) ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); @@ -1446,7 +1446,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) ret_val = e1000_check_lbtest_frame( rxdr->buffer_info[l].skb, - 1024); + 1024); if (!ret_val) good_cnt++; if (unlikely(++l == rxdr->count)) l = 0; @@ -1493,7 +1493,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) hw->serdes_has_link = false; /* On some blade server designs, link establishment - * could take as long as 2-3 minutes */ + * could take as long as 2-3 minutes + */ do { e1000_check_for_link(hw); if (hw->serdes_has_link) @@ -1545,7 +1546,8 @@ static void e1000_diag_test(struct net_device *netdev, e_info(hw, "offline testing starting\n"); /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result */ + * interfere with test result + */ if (e1000_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1639,7 +1641,8 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, default: /* dual port cards only support WoL on port A from now on * unless it was enabled in the eeprom for port B - * so exclude FUNC_1 ports from having WoL enabled */ + * so exclude FUNC_1 ports from having WoL enabled + */ if (er32(STATUS) & E1000_STATUS_FUNC_1 && !adapter->eeprom_wol) { wol->supported = 0; @@ -1663,7 +1666,8 @@ static void e1000_get_wol(struct net_device *netdev, wol->wolopts = 0; /* this function will set ->supported = 0 and return 1 if wol is not - * supported by this hardware */ + * supported by this hardware + */ if (e1000_wol_exclusion(adapter, wol) || !device_can_wakeup(&adapter->pdev->dev)) return; @@ -1839,7 +1843,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, data[i] = (e1000_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } -/* BUG_ON(i != E1000_STATS_LEN); */ +/* BUG_ON(i != E1000_STATS_LEN); */ } static void e1000_get_strings(struct net_device *netdev, u32 stringset, @@ -1859,37 +1863,37 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } -/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ break; } } static const struct ethtool_ops e1000_ethtool_ops = { - .get_settings = e1000_get_settings, - .set_settings = e1000_set_settings, - .get_drvinfo = e1000_get_drvinfo, - .get_regs_len = e1000_get_regs_len, - .get_regs = e1000_get_regs, - .get_wol = e1000_get_wol, - .set_wol = e1000_set_wol, - .get_msglevel = e1000_get_msglevel, - .set_msglevel = e1000_set_msglevel, - .nway_reset = e1000_nway_reset, - .get_link = e1000_get_link, - .get_eeprom_len = e1000_get_eeprom_len, - .get_eeprom = e1000_get_eeprom, - .set_eeprom = e1000_set_eeprom, - .get_ringparam = e1000_get_ringparam, - .set_ringparam = e1000_set_ringparam, - .get_pauseparam = e1000_get_pauseparam, - .set_pauseparam = e1000_set_pauseparam, - .self_test = e1000_diag_test, - .get_strings = e1000_get_strings, - .set_phys_id = e1000_set_phys_id, - .get_ethtool_stats = e1000_get_ethtool_stats, - .get_sset_count = e1000_get_sset_count, - .get_coalesce = e1000_get_coalesce, - .set_coalesce = e1000_set_coalesce, + .get_settings = e1000_get_settings, + .set_settings = e1000_set_settings, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = e1000_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, .get_ts_info = ethtool_op_get_ts_info, }; diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 8fedd2451538..2879b9631e15 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -164,8 +164,9 @@ static void e1000_phy_init_script(struct e1000_hw *hw) if (hw->phy_init_script) { msleep(20); - /* Save off the current value of register 0x2F5B to be restored at - * the end of this routine. */ + /* Save off the current value of register 0x2F5B to be restored + * at the end of this routine. + */ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); /* Disabled the PHY transmitter */ @@ -466,7 +467,8 @@ s32 e1000_reset_hw(struct e1000_hw *hw) case e1000_82541: case e1000_82541_rev_2: /* These controllers can't ack the 64-bit write when issuing the - * reset, so use IO-mapping as a workaround to issue the reset */ + * reset, so use IO-mapping as a workaround to issue the reset + */ E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); break; case e1000_82545_rev_3: @@ -480,9 +482,9 @@ s32 e1000_reset_hw(struct e1000_hw *hw) break; } - /* After MAC reset, force reload of EEPROM to restore power-on settings to - * device. Later controllers reload the EEPROM automatically, so just wait - * for reload to complete. + /* After MAC reset, force reload of EEPROM to restore power-on settings + * to device. Later controllers reload the EEPROM automatically, so + * just wait for reload to complete. */ switch (hw->mac_type) { case e1000_82542_rev2_0: @@ -591,8 +593,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) msleep(5); } - /* Setup the receive address. This involves initializing all of the Receive - * Address Registers (RARs 0 - 15). + /* Setup the receive address. This involves initializing all of the + * Receive Address Registers (RARs 0 - 15). */ e1000_init_rx_addrs(hw); @@ -611,7 +613,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) for (i = 0; i < mta_size; i++) { E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); /* use write flush to prevent Memory Write Block (MWB) from - * occurring when accessing our register space */ + * occurring when accessing our register space + */ E1000_WRITE_FLUSH(); } @@ -630,7 +633,9 @@ s32 e1000_init_hw(struct e1000_hw *hw) case e1000_82546_rev_3: break; default: - /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */ + /* Workaround for PCI-X problem when BIOS sets MMRBC + * incorrectly. + */ if (hw->bus_type == e1000_bus_type_pcix && e1000_pcix_get_mmrbc(hw) > 2048) e1000_pcix_set_mmrbc(hw, 2048); @@ -660,7 +665,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { ctrl_ext = er32(CTRL_EXT); /* Relaxed ordering must be disabled to avoid a parity - * error crash in a PCI slot. */ + * error crash in a PCI slot. + */ ctrl_ext |= E1000_CTRL_EXT_RO_DIS; ew32(CTRL_EXT, ctrl_ext); } @@ -810,8 +816,9 @@ s32 e1000_setup_link(struct e1000_hw *hw) ew32(FCRTL, 0); ew32(FCRTH, 0); } else { - /* We need to set up the Receive Threshold high and low water marks - * as well as (optionally) enabling the transmission of XON frames. + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. */ if (hw->fc_send_xon) { ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); @@ -868,42 +875,46 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) e1000_config_collision_dist(hw); /* Check for a software override of the flow control settings, and setup - * the device accordingly. If auto-negotiation is enabled, then software - * will have to set the "PAUSE" bits to the correct value in the Tranmsit - * Config Word Register (TXCW) and re-start auto-negotiation. However, if - * auto-negotiation is disabled, then software will have to manually - * configure the two flow control enable bits in the CTRL register. + * the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Tranmsit Config Word Register (TXCW) and re-start + * auto-negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. * * The possible values of the "fc" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, but - * not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but we do - * not support receiving pause frames). - * 3: Both Rx and TX flow control (symmetric) are enabled. + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. */ switch (hw->fc) { case E1000_FC_NONE: - /* Flow control is completely disabled by a software over-ride. */ + /* Flow ctrl is completely disabled by a software over-ride */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); break; case E1000_FC_RX_PAUSE: - /* RX Flow control is enabled and TX Flow control is disabled by a - * software over-ride. Since there really isn't a way to advertise - * that we are capable of RX Pause ONLY, we will advertise that we - * support both symmetric and asymmetric RX PAUSE. Later, we will - * disable the adapter's ability to send PAUSE frames. + /* Rx Flow control is enabled and Tx Flow control is disabled by + * a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); break; case E1000_FC_TX_PAUSE: - /* TX Flow control is enabled, and RX Flow control is disabled, by a - * software over-ride. + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); break; case E1000_FC_FULL: - /* Flow control (both RX and TX) is enabled by a software over-ride. */ + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); break; default: @@ -912,11 +923,11 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) break; } - /* Since auto-negotiation is enabled, take the link out of reset (the link - * will be in reset, because we previously reset the chip). This will - * restart auto-negotiation. If auto-negotiation is successful then the - * link-up status bit will be set and the flow control enable bits (RFCE - * and TFCE) will be set according to their negotiated value. + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. */ e_dbg("Auto-negotiation enabled\n"); @@ -927,11 +938,12 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) hw->txcw = txcw; msleep(1); - /* If we have a signal (the cable is plugged in) then poll for a "Link-Up" - * indication in the Device Status Register. Time-out if a link isn't - * seen in 500 milliseconds seconds (Auto-negotiation should complete in - * less than 500 milliseconds even if the other end is doing it in SW). - * For internal serdes, we just assume a signal is present, then poll. + /* If we have a signal (the cable is plugged in) then poll for a + * "Link-Up" indication in the Device Status Register. Time-out if a + * link isn't seen in 500 milliseconds seconds (Auto-negotiation should + * complete in less than 500 milliseconds even if the other end is doing + * it in SW). For internal serdes, we just assume a signal is present, + * then poll. */ if (hw->media_type == e1000_media_type_internal_serdes || (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { @@ -946,9 +958,9 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) e_dbg("Never got a valid link from auto-neg!!!\n"); hw->autoneg_failed = 1; /* AutoNeg failed to achieve a link, so we'll call - * e1000_check_for_link. This routine will force the link up if - * we detect a signal. This will allow us to communicate with - * non-autonegotiating link partners. + * e1000_check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. */ ret_val = e1000_check_for_link(hw); if (ret_val) { @@ -1042,9 +1054,9 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) e_dbg("e1000_copper_link_preconfig"); ctrl = er32(CTRL); - /* With 82543, we need to force speed and duplex on the MAC equal to what - * the PHY speed and duplex configuration is. In addition, we need to - * perform a hardware reset on the PHY to take it out of reset. + /* With 82543, we need to force speed and duplex on the MAC equal to + * what the PHY speed and duplex configuration is. In addition, we need + * to perform a hardware reset on the PHY to take it out of reset. */ if (hw->mac_type > e1000_82543) { ctrl |= E1000_CTRL_SLU; @@ -1175,7 +1187,8 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) /* when autonegotiation advertisement is only 1000Mbps then we * should disable SmartSpeed and enable Auto MasterSlave - * resolution as hardware default. */ + * resolution as hardware default. + */ if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { /* Disable SmartSpeed */ ret_val = @@ -1485,13 +1498,15 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw) if (hw->autoneg) { /* Setup autoneg and flow control advertisement - * and perform autonegotiation */ + * and perform autonegotiation + */ ret_val = e1000_copper_link_autoneg(hw); if (ret_val) return ret_val; } else { /* PHY will be set to 10H, 10F, 100H,or 100F - * depending on value from forced_speed_duplex. */ + * depending on value from forced_speed_duplex. + */ e_dbg("Forcing speed and duplex\n"); ret_val = e1000_phy_force_speed_duplex(hw); if (ret_val) { @@ -1609,7 +1624,8 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) * setup the PHY advertisement registers accordingly. If * auto-negotiation is enabled, then software will have to set the * "PAUSE" bits to the correct value in the Auto-Negotiation - * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation. + * Advertisement Register (PHY_AUTONEG_ADV) and re-start + * auto-negotiation. * * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled @@ -1636,7 +1652,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) * capable of RX Pause ONLY, we will advertise that we * support both symmetric and asymmetric RX PAUSE. Later * (in e1000_config_fc_after_link_up) we will disable the - *hw's ability to send PAUSE frames. + * hw's ability to send PAUSE frames. */ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; @@ -1720,15 +1736,15 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) /* Are we forcing Full or Half Duplex? */ if (hw->forced_speed_duplex == e1000_100_full || hw->forced_speed_duplex == e1000_10_full) { - /* We want to force full duplex so we SET the full duplex bits in the - * Device and MII Control Registers. + /* We want to force full duplex so we SET the full duplex bits + * in the Device and MII Control Registers. */ ctrl |= E1000_CTRL_FD; mii_ctrl_reg |= MII_CR_FULL_DUPLEX; e_dbg("Full Duplex\n"); } else { - /* We want to force half duplex so we CLEAR the full duplex bits in - * the Device and MII Control Registers. + /* We want to force half duplex so we CLEAR the full duplex bits + * in the Device and MII Control Registers. */ ctrl &= ~E1000_CTRL_FD; mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; @@ -1762,8 +1778,8 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) if (ret_val) return ret_val; - /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI - * forced whenever speed are duplex are forced. + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires + * MDI forced whenever speed are duplex are forced. */ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; ret_val = @@ -1814,10 +1830,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) e_dbg("Waiting for forced speed/duplex link.\n"); mii_status_reg = 0; - /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + /* Wait for autoneg to complete or 4.5 seconds to expire */ for (i = PHY_FORCE_TIME; i > 0; i--) { - /* Read the MII Status Register and wait for Auto-Neg Complete bit - * to be set. + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. */ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); @@ -1834,20 +1850,24 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) msleep(100); } if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { - /* We didn't get link. Reset the DSP and wait again for link. */ + /* We didn't get link. Reset the DSP and wait again + * for link. + */ ret_val = e1000_phy_reset_dsp(hw); if (ret_val) { e_dbg("Error Resetting PHY DSP\n"); return ret_val; } } - /* This loop will early-out if the link condition has been met. */ + /* This loop will early-out if the link condition has been + * met + */ for (i = PHY_FORCE_TIME; i > 0; i--) { if (mii_status_reg & MII_SR_LINK_STATUS) break; msleep(100); - /* Read the MII Status Register and wait for Auto-Neg Complete bit - * to be set. + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. */ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); @@ -1862,9 +1882,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) } if (hw->phy_type == e1000_phy_m88) { - /* Because we reset the PHY above, we need to re-force TX_CLK in the - * Extended PHY Specific Control Register to 25MHz clock. This value - * defaults back to a 2.5MHz clock when the PHY is reset. + /* Because we reset the PHY above, we need to re-force TX_CLK in + * the Extended PHY Specific Control Register to 25MHz clock. + * This value defaults back to a 2.5MHz clock when the PHY is + * reset. */ ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, @@ -1879,8 +1900,9 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) if (ret_val) return ret_val; - /* In addition, because of the s/w reset above, we need to enable CRS on - * TX. This must be set for both full and half duplex operation. + /* In addition, because of the s/w reset above, we need to + * enable CRS on Tx. This must be set for both full and half + * duplex operation. */ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); @@ -1951,7 +1973,8 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) e_dbg("e1000_config_mac_to_phy"); /* 82544 or newer MAC, Auto Speed Detection takes care of - * MAC speed/duplex configuration.*/ + * MAC speed/duplex configuration. + */ if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) return E1000_SUCCESS; @@ -1985,7 +2008,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) * registers depending on negotiated values. */ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, - &phy_data); + &phy_data); if (ret_val) return ret_val; @@ -2002,7 +2025,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) ctrl |= E1000_CTRL_SPD_1000; else if ((phy_data & M88E1000_PSSR_SPEED) == - M88E1000_PSSR_100MBS) + M88E1000_PSSR_100MBS) ctrl |= E1000_CTRL_SPD_100; } @@ -2135,9 +2158,9 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { /* The AutoNeg process has completed, so we now need to * read both the Auto Negotiation Advertisement Register - * (Address 4) and the Auto_Negotiation Base Page Ability - * Register (Address 5) to determine how flow control was - * negotiated. + * (Address 4) and the Auto_Negotiation Base Page + * Ability Register (Address 5) to determine how flow + * control was negotiated. */ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); @@ -2148,18 +2171,19 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) if (ret_val) return ret_val; - /* Two bits in the Auto Negotiation Advertisement Register - * (Address 4) and two bits in the Auto Negotiation Base - * Page Ability Register (Address 5) determine flow control - * for both the PHY and the link partner. The following - * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, - * 1999, describes these PAUSE resolution bits and how flow - * control is determined based upon these settings. + /* Two bits in the Auto Negotiation Advertisement + * Register (Address 4) and two bits in the Auto + * Negotiation Base Page Ability Register (Address 5) + * determine flow control for both the PHY and the link + * partner. The following table, taken out of the IEEE + * 802.3ab/D6.0 dated March 25, 1999, describes these + * PAUSE resolution bits and how flow control is + * determined based upon these settings. * NOTE: DC = Don't Care * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution - *-------|---------|-------|---------|-------------------- + *-------|---------|-------|---------|------------------ * 0 | 0 | DC | DC | E1000_FC_NONE * 0 | 1 | 0 | DC | E1000_FC_NONE * 0 | 1 | 1 | 0 | E1000_FC_NONE @@ -2178,17 +2202,18 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- + *-------|---------|-------|---------|------------------ * 1 | DC | 1 | DC | E1000_FC_FULL * */ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { - /* Now we need to check if the user selected RX ONLY - * of pause frames. In this case, we had to advertise - * FULL flow control because we could not advertise RX - * ONLY. Hence, we must now check to see if we need to - * turn OFF the TRANSMISSION of PAUSE frames. + /* Now we need to check if the user selected Rx + * ONLY of pause frames. In this case, we had + * to advertise FULL flow control because we + * could not advertise Rx ONLY. Hence, we must + * now check to see if we need to turn OFF the + * TRANSMISSION of PAUSE frames. */ if (hw->original_fc == E1000_FC_FULL) { hw->fc = E1000_FC_FULL; @@ -2203,7 +2228,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- + *-------|---------|-------|---------|------------------ * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE * */ @@ -2220,7 +2245,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- + *-------|---------|-------|---------|------------------ * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE * */ @@ -2233,25 +2258,27 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) e_dbg ("Flow Control = RX PAUSE frames only.\n"); } - /* Per the IEEE spec, at this point flow control should be - * disabled. However, we want to consider that we could - * be connected to a legacy switch that doesn't advertise - * desired flow control, but can be forced on the link - * partner. So if we advertised no flow control, that is - * what we will resolve to. If we advertised some kind of - * receive capability (Rx Pause Only or Full Flow Control) - * and the link partner advertised none, we will configure - * ourselves to enable Rx Flow Control only. We can do - * this safely for two reasons: If the link partner really - * didn't want flow control enabled, and we enable Rx, no - * harm done since we won't be receiving any PAUSE frames - * anyway. If the intent on the link partner was to have - * flow control enabled, then by us enabling RX only, we - * can at least receive pause frames and process them. - * This is a good idea because in most cases, since we are - * predominantly a server NIC, more times than not we will - * be asked to delay transmission of packets than asking - * our link partner to pause transmission of frames. + /* Per the IEEE spec, at this point flow control should + * be disabled. However, we want to consider that we + * could be connected to a legacy switch that doesn't + * advertise desired flow control, but can be forced on + * the link partner. So if we advertised no flow + * control, that is what we will resolve to. If we + * advertised some kind of receive capability (Rx Pause + * Only or Full Flow Control) and the link partner + * advertised none, we will configure ourselves to + * enable Rx Flow Control only. We can do this safely + * for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, + * no harm done since we won't be receiving any PAUSE + * frames anyway. If the intent on the link partner was + * to have flow control enabled, then by us enabling Rx + * only, we can at least receive pause frames and + * process them. This is a good idea because in most + * cases, since we are predominantly a server NIC, more + * times than not we will be asked to delay transmission + * of packets than asking our link partner to pause + * transmission of frames. */ else if ((hw->original_fc == E1000_FC_NONE || hw->original_fc == E1000_FC_TX_PAUSE) || @@ -2316,8 +2343,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) status = er32(STATUS); rxcw = er32(RXCW); - /* - * If we don't have link (auto-negotiation failed or link partner + /* If we don't have link (auto-negotiation failed or link partner * cannot auto-negotiate), and our link partner is not trying to * auto-negotiate with us (we are receiving idles or data), * we need to force link up. We also need to give auto-negotiation @@ -2346,8 +2372,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) goto out; } } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { - /* - * If we are forcing link and we are receiving /C/ ordered + /* If we are forcing link and we are receiving /C/ ordered * sets, re-enable auto-negotiation in the TXCW register * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. @@ -2358,8 +2383,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) hw->serdes_has_link = true; } else if (!(E1000_TXCW_ANE & er32(TXCW))) { - /* - * If we force link for non-auto-negotiation switch, check + /* If we force link for non-auto-negotiation switch, check * link status based on MAC synchronization for internal * serdes media type. */ @@ -2468,15 +2492,17 @@ s32 e1000_check_for_link(struct e1000_hw *hw) if (phy_data & MII_SR_LINK_STATUS) { hw->get_link_status = false; - /* Check if there was DownShift, must be checked immediately after - * link-up */ + /* Check if there was DownShift, must be checked + * immediately after link-up + */ e1000_check_downshift(hw); /* If we are on 82544 or 82543 silicon and speed/duplex - * are forced to 10H or 10F, then we will implement the polarity - * reversal workaround. We disable interrupts first, and upon - * returning, place the devices interrupt state to its previous - * value except for the link status change interrupt which will + * are forced to 10H or 10F, then we will implement the + * polarity reversal workaround. We disable interrupts + * first, and upon returning, place the devices + * interrupt state to its previous value except for the + * link status change interrupt which will * happen due to the execution of this workaround. */ @@ -2527,9 +2553,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw) } } - /* Configure Flow Control now that Auto-Neg has completed. First, we - * need to restore the desired flow control settings because we may - * have had to re-autoneg with a different link partner. + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control settings + * because we may have had to re-autoneg with a different link + * partner. */ ret_val = e1000_config_fc_after_link_up(hw); if (ret_val) { @@ -2538,11 +2565,12 @@ s32 e1000_check_for_link(struct e1000_hw *hw) } /* At this point we know that we are on copper and we have - * auto-negotiated link. These are conditions for checking the link - * partner capability register. We use the link speed to determine if - * TBI compatibility needs to be turned on or off. If the link is not - * at gigabit speed, then TBI compatibility is not needed. If we are - * at gigabit speed, we turn on TBI compatibility. + * auto-negotiated link. These are conditions for checking the + * link partner capability register. We use the link speed to + * determine if TBI compatibility needs to be turned on or off. + * If the link is not at gigabit speed, then TBI compatibility + * is not needed. If we are at gigabit speed, we turn on TBI + * compatibility. */ if (hw->tbi_compatibility_en) { u16 speed, duplex; @@ -2554,20 +2582,23 @@ s32 e1000_check_for_link(struct e1000_hw *hw) return ret_val; } if (speed != SPEED_1000) { - /* If link speed is not set to gigabit speed, we do not need - * to enable TBI compatibility. + /* If link speed is not set to gigabit speed, we + * do not need to enable TBI compatibility. */ if (hw->tbi_compatibility_on) { - /* If we previously were in the mode, turn it off. */ + /* If we previously were in the mode, + * turn it off. + */ rctl = er32(RCTL); rctl &= ~E1000_RCTL_SBP; ew32(RCTL, rctl); hw->tbi_compatibility_on = false; } } else { - /* If TBI compatibility is was previously off, turn it on. For - * compatibility with a TBI link partner, we will store bad - * packets. Some frames have an additional byte on the end and + /* If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and * will look like CRC errors to to the hardware. */ if (!hw->tbi_compatibility_on) { @@ -2629,9 +2660,9 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) *duplex = FULL_DUPLEX; } - /* IGP01 PHY may advertise full duplex operation after speed downgrade even - * if it is operating at half duplex. Here we set the duplex settings to - * match the duplex in the link partner's capabilities. + /* IGP01 PHY may advertise full duplex operation after speed downgrade + * even if it is operating at half duplex. Here we set the duplex + * settings to match the duplex in the link partner's capabilities. */ if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); @@ -2697,8 +2728,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw) */ static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) { - /* Raise the clock input to the Management Data Clock (by setting the MDC - * bit), and then delay 10 microseconds. + /* Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay 10 microseconds. */ ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); E1000_WRITE_FLUSH(); @@ -2712,8 +2743,8 @@ static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) */ static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) { - /* Lower the clock input to the Management Data Clock (by clearing the MDC - * bit), and then delay 10 microseconds. + /* Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay 10 microseconds. */ ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); E1000_WRITE_FLUSH(); @@ -2746,10 +2777,10 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); while (mask) { - /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and - * then raising and lowering the Management Data Clock. A "0" is - * shifted out to the PHY by setting the MDIO bit to "0" and then - * raising and lowering the clock. + /* A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. */ if (data & mask) ctrl |= E1000_CTRL_MDIO; @@ -2781,24 +2812,26 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) u8 i; /* In order to read a register from the PHY, we need to shift in a total - * of 18 bits from the PHY. The first two bit (turnaround) times are used - * to avoid contention on the MDIO pin when a read operation is performed. - * These two bits are ignored by us and thrown away. Bits are "shifted in" - * by raising the input to the Management Data Clock (setting the MDC bit), - * and then reading the value of the MDIO bit. + * of 18 bits from the PHY. The first two bit (turnaround) times are + * used to avoid contention on the MDIO pin when a read operation is + * performed. These two bits are ignored by us and thrown away. Bits are + * "shifted in" by raising the input to the Management Data Clock + * (setting the MDC bit), and then reading the value of the MDIO bit. */ ctrl = er32(CTRL); - /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */ + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ ctrl &= ~E1000_CTRL_MDIO_DIR; ctrl &= ~E1000_CTRL_MDIO; ew32(CTRL, ctrl); E1000_WRITE_FLUSH(); - /* Raise and Lower the clock before reading in the data. This accounts for - * the turnaround bits. The first clock occurred when we clocked out the - * last bit of the Register Address. + /* Raise and Lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked out + * the last bit of the Register Address. */ e1000_raise_mdi_clk(hw, &ctrl); e1000_lower_mdi_clk(hw, &ctrl); @@ -2870,8 +2903,8 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, if (hw->mac_type > e1000_82543) { /* Set up Op-code, Phy Address, and register address in the MDI - * Control register. The MAC will take care of interfacing with the - * PHY to retrieve the desired data. + * Control register. The MAC will take care of interfacing with + * the PHY to retrieve the desired data. */ if (hw->mac_type == e1000_ce4100) { mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | @@ -2929,31 +2962,32 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, *phy_data = (u16) mdic; } } else { - /* We must first send a preamble through the MDIO pin to signal the - * beginning of an MII instruction. This is done by sending 32 - * consecutive "1" bits. + /* We must first send a preamble through the MDIO pin to signal + * the beginning of an MII instruction. This is done by sending + * 32 consecutive "1" bits. */ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); /* Now combine the next few fields that are required for a read * operation. We use this method instead of calling the - * e1000_shift_out_mdi_bits routine five different times. The format of - * a MII read instruction consists of a shift out of 14 bits and is - * defined as follows: + * e1000_shift_out_mdi_bits routine five different times. The + * format of a MII read instruction consists of a shift out of + * 14 bits and is defined as follows: * <Preamble><SOF><Op Code><Phy Addr><Reg Addr> - * followed by a shift in of 18 bits. This first two bits shifted in - * are TurnAround bits used to avoid contention on the MDIO pin when a - * READ operation is performed. These two bits are thrown away - * followed by a shift in of 16 bits which contains the desired data. + * followed by a shift in of 18 bits. This first two bits + * shifted in are TurnAround bits used to avoid contention on + * the MDIO pin when a READ operation is performed. These two + * bits are thrown away followed by a shift in of 16 bits which + * contains the desired data. */ mdic = ((reg_addr) | (phy_addr << 5) | (PHY_OP_READ << 10) | (PHY_SOF << 12)); e1000_shift_out_mdi_bits(hw, mdic, 14); - /* Now that we've shifted out the read command to the MII, we need to - * "shift in" the 16-bit value (18 total bits) of the requested PHY - * register address. + /* Now that we've shifted out the read command to the MII, we + * need to "shift in" the 16-bit value (18 total bits) of the + * requested PHY register address. */ *phy_data = e1000_shift_in_mdi_bits(hw); } @@ -3060,18 +3094,18 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, } } } else { - /* We'll need to use the SW defined pins to shift the write command - * out to the PHY. We first send a preamble to the PHY to signal the - * beginning of the MII instruction. This is done by sending 32 - * consecutive "1" bits. + /* We'll need to use the SW defined pins to shift the write + * command out to the PHY. We first send a preamble to the PHY + * to signal the beginning of the MII instruction. This is done + * by sending 32 consecutive "1" bits. */ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); - /* Now combine the remaining required fields that will indicate a - * write operation. We use this method instead of calling the - * e1000_shift_out_mdi_bits routine for each field in the command. The - * format of a MII write instruction is as follows: - * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>. + /* Now combine the remaining required fields that will indicate + * a write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the + * command. The format of a MII write instruction is as follows: + * <Preamble><SOF><OpCode><PhyAddr><RegAddr><Turnaround><Data>. */ mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); @@ -3100,10 +3134,10 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw) e_dbg("Resetting Phy...\n"); if (hw->mac_type > e1000_82543) { - /* Read the device control register and assert the E1000_CTRL_PHY_RST - * bit. Then, take it out of reset. + /* Read the device control register and assert the + * E1000_CTRL_PHY_RST bit. Then, take it out of reset. * For e1000 hardware, we delay for 10ms between the assert - * and deassert. + * and de-assert. */ ctrl = er32(CTRL); ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); @@ -3115,8 +3149,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw) E1000_WRITE_FLUSH(); } else { - /* Read the Extended Device Control Register, assert the PHY_RESET_DIR - * bit to put the PHY into reset. Then, take it out of reset. + /* Read the Extended Device Control Register, assert the + * PHY_RESET_DIR bit to put the PHY into reset. Then, take it + * out of reset. */ ctrl_ext = er32(CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; @@ -3301,7 +3336,8 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, e_dbg("e1000_phy_igp_get_info"); /* The downshift status is checked only once, after link is established, - * and it stored in the hw->speed_downgraded parameter. */ + * and it stored in the hw->speed_downgraded parameter. + */ phy_info->downshift = (e1000_downshift) hw->speed_downgraded; /* IGP01E1000 does not need to support it. */ @@ -3327,7 +3363,9 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { - /* Local/Remote Receiver Information are only valid at 1000 Mbps */ + /* Local/Remote Receiver Information are only valid @ 1000 + * Mbps + */ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); if (ret_val) return ret_val; @@ -3379,7 +3417,8 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, e_dbg("e1000_phy_m88_get_info"); /* The downshift status is checked only once, after link is established, - * and it stored in the hw->speed_downgraded parameter. */ + * and it stored in the hw->speed_downgraded parameter. + */ phy_info->downshift = (e1000_downshift) hw->speed_downgraded; ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); @@ -3574,8 +3613,8 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw) } if (eeprom->type == e1000_eeprom_spi) { - /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to - * 32KB (incremented by powers of 2). + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes + * 128B to 32KB (incremented by powers of 2). */ /* Set to default value for initial eeprom read. */ eeprom->word_size = 64; @@ -3585,8 +3624,9 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw) eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; /* 256B eeprom size was not supported in earlier hardware, so we - * bump eeprom_size up one to ensure that "1" (which maps to 256B) - * is never the result used in the shifting logic below. */ + * bump eeprom_size up one to ensure that "1" (which maps to + * 256B) is never the result used in the shifting logic below. + */ if (eeprom_size) eeprom_size++; @@ -3618,8 +3658,8 @@ static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) */ static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) { - /* Lower the clock input to the EEPROM (by clearing the SK bit), and then - * wait 50 microseconds. + /* Lower the clock input to the EEPROM (by clearing the SK bit), and + * then wait 50 microseconds. */ *eecd = *eecd & ~E1000_EECD_SK; ew32(EECD, *eecd); @@ -3651,10 +3691,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) eecd |= E1000_EECD_DO; } do { - /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", - * and then raising and then lowering the clock (the SK bit controls - * the clock input to the EEPROM). A "0" is shifted out to the EEPROM - * by setting "DI" to "0" and then raising and then lowering the clock. + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK bit + * controls the clock input to the EEPROM). A "0" is shifted + * out to the EEPROM by setting "DI" to "0" and then raising and + * then lowering the clock. */ eecd &= ~E1000_EECD_DI; @@ -3691,9 +3732,9 @@ static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) /* In order to read a register from the EEPROM, we need to shift 'count' * bits in from the EEPROM. Bits are "shifted in" by raising the clock - * input to the EEPROM (setting the SK bit), and then reading the value of - * the "DO" bit. During this "shifting in" process the "DI" bit should - * always be clear. + * input to the EEPROM (setting the SK bit), and then reading the value + * of the "DO" bit. During this "shifting in" process the "DI" bit + * should always be clear. */ eecd = er32(EECD); @@ -3945,8 +3986,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, if (eeprom->word_size == 0) e1000_init_eeprom_params(hw); - /* A check for invalid values: offset too large, too many words, and not - * enough words. + /* A check for invalid values: offset too large, too many words, and + * not enough words. */ if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || (words == 0)) { @@ -3964,7 +4005,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, return -E1000_ERR_EEPROM; /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have - * acquired the EEPROM at this point, so any returns should release it */ + * acquired the EEPROM at this point, so any returns should release it + */ if (eeprom->type == e1000_eeprom_spi) { u16 word_in; u8 read_opcode = EEPROM_READ_OPCODE_SPI; @@ -3976,7 +4018,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, e1000_standby_eeprom(hw); - /* Some SPI eeproms use the 8th address bit embedded in the opcode */ + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ if ((eeprom->address_bits == 8) && (offset >= 128)) read_opcode |= EEPROM_A8_OPCODE_SPI; @@ -3985,11 +4029,13 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, e1000_shift_out_ee_bits(hw, (u16) (offset * 2), eeprom->address_bits); - /* Read the data. The address of the eeprom internally increments with - * each byte (spi) being read, saving on the overhead of eeprom setup - * and tear-down. The address counter will roll over if reading beyond - * the size of the eeprom, thus allowing the entire memory to be read - * starting from any offset. */ + /* Read the data. The address of the eeprom internally + * increments with each byte (spi) being read, saving on the + * overhead of eeprom setup and tear-down. The address counter + * will roll over if reading beyond the size of the eeprom, thus + * allowing the entire memory to be read starting from any + * offset. + */ for (i = 0; i < words; i++) { word_in = e1000_shift_in_ee_bits(hw, 16); data[i] = (word_in >> 8) | (word_in << 8); @@ -4003,8 +4049,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, e1000_shift_out_ee_bits(hw, (u16) (offset + i), eeprom->address_bits); - /* Read the data. For microwire, each word requires the overhead - * of eeprom setup and tear-down. */ + /* Read the data. For microwire, each word requires the + * overhead of eeprom setup and tear-down. + */ data[i] = e1000_shift_in_ee_bits(hw, 16); e1000_standby_eeprom(hw); } @@ -4119,8 +4166,8 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, if (eeprom->word_size == 0) e1000_init_eeprom_params(hw); - /* A check for invalid values: offset too large, too many words, and not - * enough words. + /* A check for invalid values: offset too large, too many words, and + * not enough words. */ if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || (words == 0)) { @@ -4174,7 +4221,9 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, e1000_standby_eeprom(hw); - /* Some SPI eeproms use the 8th address bit embedded in the opcode */ + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ if ((eeprom->address_bits == 8) && (offset >= 128)) write_opcode |= EEPROM_A8_OPCODE_SPI; @@ -4186,16 +4235,19 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, /* Send the data */ - /* Loop to allow for up to whole page write (32 bytes) of eeprom */ + /* Loop to allow for up to whole page write (32 bytes) of + * eeprom + */ while (widx < words) { u16 word_out = data[widx]; word_out = (word_out >> 8) | (word_out << 8); e1000_shift_out_ee_bits(hw, word_out, 16); widx++; - /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE - * operation, while the smaller eeproms are capable of an 8-byte - * PAGE WRITE operation. Break the inner loop to pass new address + /* Some larger eeprom sizes are capable of a 32-byte + * PAGE WRITE operation, while the smaller eeproms are + * capable of an 8-byte PAGE WRITE operation. Break the + * inner loop to pass new address */ if ((((offset + widx) * 2) % eeprom->page_size) == 0) { e1000_standby_eeprom(hw); @@ -4249,14 +4301,15 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, /* Send the data */ e1000_shift_out_ee_bits(hw, data[words_written], 16); - /* Toggle the CS line. This in effect tells the EEPROM to execute - * the previous command. + /* Toggle the CS line. This in effect tells the EEPROM to + * execute the previous command. */ e1000_standby_eeprom(hw); - /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will - * signal that the command has been completed by raising the DO signal. - * If DO does not go high in 10 milliseconds, then error out. + /* Read DO repeatedly until it is high (equal to '1'). The + * EEPROM will signal that the command has been completed by + * raising the DO signal. If DO does not go high in 10 + * milliseconds, then error out. */ for (i = 0; i < 200; i++) { eecd = er32(EECD); @@ -4483,7 +4536,8 @@ static void e1000_clear_vfta(struct e1000_hw *hw) for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { /* If the offset we want to clear is the same offset of the * manageability VLAN ID, then clear all bits except that of the - * manageability unit */ + * manageability unit + */ vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); E1000_WRITE_FLUSH(); @@ -4911,12 +4965,12 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, * counters overcount this packet as a CRC error and undercount * the packet as a good packet */ - /* This packet should not be counted as a CRC error. */ + /* This packet should not be counted as a CRC error. */ stats->crcerrs--; - /* This packet does count as a Good Packet Received. */ + /* This packet does count as a Good Packet Received. */ stats->gprc++; - /* Adjust the Good Octets received counters */ + /* Adjust the Good Octets received counters */ carry_bit = 0x80000000 & stats->gorcl; stats->gorcl += frame_len; /* If the high bit of Gorcl (the low 32 bits of the Good Octets @@ -5196,8 +5250,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw, if (ret_val) return ret_val; - /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to - * find the polarity status */ + /* If speed is 1000 Mbps, must read the + * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status + */ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { @@ -5213,8 +5268,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw, e1000_rev_polarity_reversed : e1000_rev_polarity_normal; } else { - /* For 10 Mbps, read the polarity bit in the status register. (for - * 100 Mbps this bit is always 0) */ + /* For 10 Mbps, read the polarity bit in the status + * register. (for 100 Mbps this bit is always 0) + */ *polarity = (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? e1000_rev_polarity_reversed : @@ -5374,8 +5430,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) } } else { if (hw->dsp_config_state == e1000_dsp_config_activated) { - /* Save off the current value of register 0x2F5B to be restored at - * the end of the routines. */ + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); @@ -5391,7 +5448,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) msleep(20); ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_FORCE_GIGA); + IGP01E1000_IEEE_FORCE_GIGA); if (ret_val) return ret_val; for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { @@ -5412,7 +5469,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) } ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_RESTART_AUTONEG); + IGP01E1000_IEEE_RESTART_AUTONEG); if (ret_val) return ret_val; @@ -5429,8 +5486,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) } if (hw->ffe_config_state == e1000_ffe_config_active) { - /* Save off the current value of register 0x2F5B to be restored at - * the end of the routines. */ + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); @@ -5446,7 +5504,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) msleep(20); ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_FORCE_GIGA); + IGP01E1000_IEEE_FORCE_GIGA); if (ret_val) return ret_val; ret_val = @@ -5456,7 +5514,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) return ret_val; ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_RESTART_AUTONEG); + IGP01E1000_IEEE_RESTART_AUTONEG); if (ret_val) return ret_val; @@ -5542,8 +5600,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) return E1000_SUCCESS; /* During driver activity LPLU should not be used or it will attain link - * from the lowest speeds starting from 10Mbps. The capability is used for - * Dx transitions and states */ + * from the lowest speeds starting from 10Mbps. The capability is used + * for Dx transitions and states + */ if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { ret_val = @@ -5563,10 +5622,11 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) return ret_val; } - /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during - * Dx states where the power conservation is most important. During - * driver activity we should enable SmartSpeed, so performance is - * maintained. */ + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ if (hw->smart_speed == e1000_smart_speed_on) { ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 294da56b824c..8502c625dbef 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -239,7 +239,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) * e1000_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ - static int __init e1000_init_module(void) { int ret; @@ -266,7 +265,6 @@ module_init(e1000_init_module); * e1000_exit_module is called just before the driver is removed * from memory. **/ - static void __exit e1000_exit_module(void) { pci_unregister_driver(&e1000_driver); @@ -301,7 +299,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter) * e1000_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ - static void e1000_irq_disable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -315,7 +312,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter) * e1000_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ - static void e1000_irq_enable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -398,11 +394,12 @@ static void e1000_configure(struct e1000_adapter *adapter) e1000_configure_rx(adapter); /* call E1000_DESC_UNUSED which always leaves * at least 1 descriptor unused to make sure - * next_to_use != next_to_clean */ + * next_to_use != next_to_clean + */ for (i = 0; i < adapter->num_rx_queues; i++) { struct e1000_rx_ring *ring = &adapter->rx_ring[i]; adapter->alloc_rx_buf(adapter, ring, - E1000_DESC_UNUSED(ring)); + E1000_DESC_UNUSED(ring)); } } @@ -433,9 +430,7 @@ int e1000_up(struct e1000_adapter *adapter) * The phy may be powered down to save power and turn off link when the * driver is unloaded and wake on lan is not enabled (among others) * *** this routine MUST be followed by a call to e1000_reset *** - * **/ - void e1000_power_up_phy(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -444,7 +439,8 @@ void e1000_power_up_phy(struct e1000_adapter *adapter) /* Just clear the power down bit to wake the phy back up */ if (hw->media_type == e1000_media_type_copper) { /* according to the manual, the phy will retain its - * settings across a power-down/up cycle */ + * settings across a power-down/up cycle + */ e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); mii_reg &= ~MII_CR_POWER_DOWN; e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); @@ -459,7 +455,8 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) * The PHY cannot be powered down if any of the following is true * * (a) WoL is enabled * (b) AMT is active - * (c) SoL/IDER session is active */ + * (c) SoL/IDER session is active + */ if (!adapter->wol && hw->mac_type >= e1000_82540 && hw->media_type == e1000_media_type_copper) { u16 mii_reg = 0; @@ -529,8 +526,7 @@ void e1000_down(struct e1000_adapter *adapter) e1000_irq_disable(adapter); - /* - * Setting DOWN must be after irq_disable to prevent + /* Setting DOWN must be after irq_disable to prevent * a screaming interrupt. Setting DOWN also prevents * tasks from rescheduling. */ @@ -627,14 +623,14 @@ void e1000_reset(struct e1000_adapter *adapter) * rounded up to the next 1KB and expressed in KB. Likewise, * the Rx FIFO should be large enough to accommodate at least * one full receive packet and is similarly rounded up and - * expressed in KB. */ + * expressed in KB. + */ pba = er32(PBA); /* upper 16 bits has Tx packet buffer allocation size in KB */ tx_space = pba >> 16; /* lower 16 bits has Rx packet buffer allocation size in KB */ pba &= 0xffff; - /* - * the tx fifo also stores 16 bytes of information about the tx + /* the Tx fifo also stores 16 bytes of information about the Tx * but don't include ethernet FCS because hardware appends it */ min_tx_space = (hw->max_frame_size + @@ -649,7 +645,8 @@ void e1000_reset(struct e1000_adapter *adapter) /* If current Tx allocation is less than the min Tx FIFO size, * and the min Tx FIFO size is less than the current Rx FIFO - * allocation, take space away from current Rx allocation */ + * allocation, take space away from current Rx allocation + */ if (tx_space < min_tx_space && ((min_tx_space - tx_space) < pba)) { pba = pba - (min_tx_space - tx_space); @@ -663,8 +660,9 @@ void e1000_reset(struct e1000_adapter *adapter) break; } - /* if short on rx space, rx wins and must trump tx - * adjustment or use Early Receive if available */ + /* if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ if (pba < min_rx_space) pba = min_rx_space; } @@ -672,8 +670,7 @@ void e1000_reset(struct e1000_adapter *adapter) ew32(PBA, pba); - /* - * flow control settings: + /* flow control settings: * The high water mark must be low enough to fit one full frame * (or the size used for early receive) above it in the Rx FIFO. * Set it to the lower of: @@ -707,7 +704,8 @@ void e1000_reset(struct e1000_adapter *adapter) u32 ctrl = er32(CTRL); /* clear phy power management bit if we are in gig only mode, * which if enabled will attempt negotiation to 100Mb, which - * can cause a loss of link at power off or driver unload */ + * can cause a loss of link at power off or driver unload + */ ctrl &= ~E1000_CTRL_SWDPIN3; ew32(CTRL, ctrl); } @@ -808,9 +806,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev) static netdev_features_t e1000_fix_features(struct net_device *netdev, netdev_features_t features) { - /* - * Since there is no support for separate rx/tx vlan accel - * enable/disable make sure tx flag is always in same state as rx. + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. */ if (features & NETIF_F_HW_VLAN_RX) features |= NETIF_F_HW_VLAN_TX; @@ -1012,16 +1009,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; - /* - * there is a workaround being applied below that limits + /* there is a workaround being applied below that limits * 64-bit DMA addresses to 64-bit hardware. There are some * 32-bit adapters that Tx hang when given 64-bit DMA addresses */ pci_using_dac = 0; if ((hw->bus_type == e1000_bus_type_pcix) && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { - /* - * according to DMA-API-HOWTO, coherent calls will always + /* according to DMA-API-HOWTO, coherent calls will always * succeed if the set call did */ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); @@ -1099,7 +1094,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* before reading the EEPROM, reset the controller to - * put the device in a known good starting state */ + * put the device in a known good starting state + */ e1000_reset_hw(hw); @@ -1107,8 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (e1000_validate_eeprom_checksum(hw) < 0) { e_err(probe, "The EEPROM Checksum Is Not Valid\n"); e1000_dump_eeprom(adapter); - /* - * set MAC address to all zeroes to invalidate and temporary + /* set MAC address to all zeroes to invalidate and temporary * disable this device for the user. This blocks regular * traffic while still permitting ethtool ioctls from reaching * the hardware as well as allowing the user to run the @@ -1123,9 +1118,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* don't block initalization here due to bad MAC address */ memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); - memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); - if (!is_valid_ether_addr(netdev->perm_addr)) + if (!is_valid_ether_addr(netdev->dev_addr)) e_err(probe, "Invalid MAC Address\n"); @@ -1170,7 +1164,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* now that we have the eeprom settings, apply the special cases * where the eeprom may be wrong or the board simply won't support - * wake on lan on a particular port */ + * wake on lan on a particular port + */ switch (pdev->device) { case E1000_DEV_ID_82546GB_PCIE: adapter->eeprom_wol = 0; @@ -1178,7 +1173,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546GB_FIBER: /* Wake events only supported on port A for dual fiber - * regardless of eeprom setting */ + * regardless of eeprom setting + */ if (er32(STATUS) & E1000_STATUS_FUNC_1) adapter->eeprom_wol = 0; break; @@ -1271,7 +1267,6 @@ err_pci_reg: * Hot-Plug event, or because the driver is going to be removed from * memory. **/ - static void e1000_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -1307,7 +1302,6 @@ static void e1000_remove(struct pci_dev *pdev) * e1000_sw_init initializes the Adapter private data structure. * e1000_init_hw_struct MUST be called before this function **/ - static int e1000_sw_init(struct e1000_adapter *adapter) { adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; @@ -1338,7 +1332,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter) * We allocate one ring per queue at run-time since we don't know the * number of queues at compile-time. **/ - static int e1000_alloc_queues(struct e1000_adapter *adapter) { adapter->tx_ring = kcalloc(adapter->num_tx_queues, @@ -1368,7 +1361,6 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter) * handler is registered with the OS, the watchdog task is started, * and the stack is notified that the interface is ready. **/ - static int e1000_open(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1402,7 +1394,8 @@ static int e1000_open(struct net_device *netdev) /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * as soon as we call pci_request_irq, so we have to setup our - * clean_rx handler before we do so. */ + * clean_rx handler before we do so. + */ e1000_configure(adapter); err = e1000_request_irq(adapter); @@ -1445,7 +1438,6 @@ err_setup_tx: * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ - static int e1000_close(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1460,10 +1452,11 @@ static int e1000_close(struct net_device *netdev) e1000_free_all_rx_resources(adapter); /* kill manageability vlan ID if supported, but not if a vlan with - * the same ID is registered on the host OS (let 8021q kill it) */ + * the same ID is registered on the host OS (let 8021q kill it) + */ if ((hw->mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && - !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); } @@ -1484,7 +1477,8 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, unsigned long end = begin + len; /* First rev 82545 and 82546 need to not allow any memory - * write location to cross 64k boundary due to errata 23 */ + * write location to cross 64k boundary due to errata 23 + */ if (hw->mac_type == e1000_82545 || hw->mac_type == e1000_ce4100 || hw->mac_type == e1000_82546) { @@ -1501,7 +1495,6 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, * * Return 0 on success, negative on failure **/ - static int e1000_setup_tx_resources(struct e1000_adapter *adapter, struct e1000_tx_ring *txdr) { @@ -1510,11 +1503,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, size = sizeof(struct e1000_buffer) * txdr->count; txdr->buffer_info = vzalloc(size); - if (!txdr->buffer_info) { - e_err(probe, "Unable to allocate memory for the Tx descriptor " - "ring\n"); + if (!txdr->buffer_info) return -ENOMEM; - } /* round up to nearest 4K */ @@ -1578,7 +1568,6 @@ setup_tx_desc_die: * * Return 0 on success, negative on failure **/ - int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) { int i, err = 0; @@ -1603,7 +1592,6 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) * * Configure the Tx unit of the MAC after a reset. **/ - static void e1000_configure_tx(struct e1000_adapter *adapter) { u64 tdba; @@ -1624,8 +1612,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); ew32(TDT, 0); ew32(TDH, 0); - adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); - adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); + adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? + E1000_TDH : E1000_82542_TDH); + adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? + E1000_TDT : E1000_82542_TDT); break; } @@ -1680,7 +1670,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) adapter->txd_cmd |= E1000_TXD_CMD_RS; /* Cache if we're 82544 running in PCI-X because we'll - * need this to apply a workaround later in the send path. */ + * need this to apply a workaround later in the send path. + */ if (hw->mac_type == e1000_82544 && hw->bus_type == e1000_bus_type_pcix) adapter->pcix_82544 = true; @@ -1696,7 +1687,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) * * Returns 0 on success, negative on failure **/ - static int e1000_setup_rx_resources(struct e1000_adapter *adapter, struct e1000_rx_ring *rxdr) { @@ -1705,11 +1695,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, size = sizeof(struct e1000_buffer) * rxdr->count; rxdr->buffer_info = vzalloc(size); - if (!rxdr->buffer_info) { - e_err(probe, "Unable to allocate memory for the Rx descriptor " - "ring\n"); + if (!rxdr->buffer_info) return -ENOMEM; - } desc_len = sizeof(struct e1000_rx_desc); @@ -1778,7 +1765,6 @@ setup_rx_desc_die: * * Return 0 on success, negative on failure **/ - int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) { int i, err = 0; @@ -1847,7 +1833,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) /* This is useful for sniffing bad packets. */ if (adapter->netdev->features & NETIF_F_RXALL) { /* UPE and MPE will be handled by normal PROMISC logic - * in e1000e_set_rx_mode */ + * in e1000e_set_rx_mode + */ rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ E1000_RCTL_BAM | /* RX All Bcast Pkts */ E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ @@ -1869,7 +1856,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) * * Configure the Rx unit of the MAC after a reset. **/ - static void e1000_configure_rx(struct e1000_adapter *adapter) { u64 rdba; @@ -1902,7 +1888,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) } /* Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring */ + * the Base and Length of the Rx Descriptor Ring + */ switch (adapter->num_rx_queues) { case 1: default: @@ -1912,8 +1899,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); ew32(RDT, 0); ew32(RDH, 0); - adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); - adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); + adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? + E1000_RDH : E1000_82542_RDH); + adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? + E1000_RDT : E1000_82542_RDT); break; } @@ -1939,7 +1928,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) * * Free all transmit software resources **/ - static void e1000_free_tx_resources(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring) { @@ -1962,7 +1950,6 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter, * * Free all transmit software resources **/ - void e1000_free_all_tx_resources(struct e1000_adapter *adapter) { int i; @@ -1997,7 +1984,6 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, * @adapter: board private structure * @tx_ring: ring to be cleaned **/ - static void e1000_clean_tx_ring(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring) { @@ -2033,7 +2019,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter, * e1000_clean_all_tx_rings - Free Tx Buffers for all queues * @adapter: board private structure **/ - static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) { int i; @@ -2049,7 +2034,6 @@ static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) * * Free all receive software resources **/ - static void e1000_free_rx_resources(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring) { @@ -2072,7 +2056,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter, * * Free all receive software resources **/ - void e1000_free_all_rx_resources(struct e1000_adapter *adapter) { int i; @@ -2086,7 +2069,6 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter) * @adapter: board private structure * @rx_ring: ring to free buffers from **/ - static void e1000_clean_rx_ring(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring) { @@ -2145,7 +2127,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, * e1000_clean_all_rx_rings - Free Rx Buffers for all queues * @adapter: board private structure **/ - static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) { int i; @@ -2205,7 +2186,6 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter) * * Returns 0 on success, negative on failure **/ - static int e1000_set_mac(struct net_device *netdev, void *p) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -2240,7 +2220,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p) * responsible for configuring the hardware for proper unicast, multicast, * promiscuous mode, and all-multi behavior. **/ - static void e1000_set_rx_mode(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -2253,10 +2232,8 @@ static void e1000_set_rx_mode(struct net_device *netdev) int mta_reg_count = E1000_NUM_MTA_REGISTERS; u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); - if (!mcarray) { - e_err(probe, "memory allocation failed\n"); + if (!mcarray) return; - } /* Check for Promiscuous and All Multicast modes */ @@ -2326,10 +2303,10 @@ static void e1000_set_rx_mode(struct net_device *netdev) } /* write the hash table completely, write from bottom to avoid - * both stupid write combining chipsets, and flushing each write */ + * both stupid write combining chipsets, and flushing each write + */ for (i = mta_reg_count - 1; i >= 0 ; i--) { - /* - * If we are on an 82544 has an errata where writing odd + /* If we are on an 82544 has an errata where writing odd * offsets overwrites the previous even offset, but writing * backwards over the range solves the issue by always * writing the odd offset first @@ -2467,8 +2444,8 @@ static void e1000_watchdog(struct work_struct *work) bool txb2b = true; /* update snapshot of PHY registers on LSC */ e1000_get_speed_and_duplex(hw, - &adapter->link_speed, - &adapter->link_duplex); + &adapter->link_speed, + &adapter->link_duplex); ctrl = er32(CTRL); pr_info("%s NIC Link is Up %d Mbps %s, " @@ -2542,7 +2519,8 @@ link_up: /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. - * (Do the reset outside of interrupt context). */ + * (Do the reset outside of interrupt context). + */ adapter->tx_timeout_count++; schedule_work(&adapter->reset_task); /* exit immediately since reset is imminent */ @@ -2552,8 +2530,7 @@ link_up: /* Simple mode for Interrupt Throttle Rate (ITR) */ if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { - /* - * Symmetric Tx/Rx gets a reduced ITR=2000; + /* Symmetric Tx/Rx gets a reduced ITR=2000; * Total asymmetrical Tx or Rx gets ITR=8000; * everyone else is between 2000-8000. */ @@ -2668,18 +2645,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter) goto set_itr_now; } - adapter->tx_itr = e1000_update_itr(adapter, - adapter->tx_itr, - adapter->total_tx_packets, - adapter->total_tx_bytes); + adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) adapter->tx_itr = low_latency; - adapter->rx_itr = e1000_update_itr(adapter, - adapter->rx_itr, - adapter->total_rx_packets, - adapter->total_rx_bytes); + adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) adapter->rx_itr = low_latency; @@ -2705,10 +2680,11 @@ set_itr_now: if (new_itr != adapter->itr) { /* this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is - * increasing */ + * increasing + */ new_itr = new_itr > adapter->itr ? - min(adapter->itr + (new_itr >> 2), new_itr) : - new_itr; + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; adapter->itr = new_itr; ew32(ITR, 1000000000 / (new_itr * 256)); } @@ -2870,7 +2846,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, /* Workaround for Controller erratum -- * descriptor for non-tso packet in a linear SKB that follows a * tso gets written back prematurely before the data is fully - * DMA'd to the controller */ + * DMA'd to the controller + */ if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) { tx_ring->last_tx_tso = false; @@ -2878,7 +2855,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, } /* Workaround for premature desc write-backs - * in TSO mode. Append 4-byte sentinel desc */ + * in TSO mode. Append 4-byte sentinel desc + */ if (unlikely(mss && !nr_frags && size == len && size > 8)) size -= 4; /* work-around for errata 10 and it applies @@ -2891,7 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, size = 2015; /* Workaround for potential 82544 hang in PCI-X. Avoid - * terminating buffers within evenly-aligned dwords. */ + * terminating buffers within evenly-aligned dwords. + */ if (unlikely(adapter->pcix_82544 && !((unsigned long)(skb->data + offset + size - 1) & 4) && size > 4)) @@ -2903,7 +2882,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, buffer_info->mapped_as_page = false; buffer_info->dma = dma_map_single(&pdev->dev, skb->data + offset, - size, DMA_TO_DEVICE); + size, DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; buffer_info->next_to_watch = i; @@ -2934,12 +2913,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter, buffer_info = &tx_ring->buffer_info[i]; size = min(len, max_per_txd); /* Workaround for premature desc write-backs - * in TSO mode. Append 4-byte sentinel desc */ - if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && f == (nr_frags-1) && + size == len && size > 8)) size -= 4; /* Workaround for potential 82544 hang in PCI-X. * Avoid terminating buffers within evenly-aligned - * dwords. */ + * dwords. + */ bufend = (unsigned long) page_to_phys(skb_frag_page(frag)); bufend += offset + size - 1; @@ -3003,7 +2985,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | - E1000_TXD_CMD_TSE; + E1000_TXD_CMD_TSE; txd_upper |= E1000_TXD_POPTS_TXSM << 8; if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) @@ -3044,13 +3026,15 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). */ + * such as IA-64). + */ wmb(); tx_ring->next_to_use = i; writel(i, hw->hw_addr + tx_ring->tdt); /* we need this if more than one processor can write to our tail - * at a time, it syncronizes IO on IA64/Altix systems */ + * at a time, it synchronizes IO on IA64/Altix systems + */ mmiowb(); } @@ -3099,11 +3083,13 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) netif_stop_queue(netdev); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. */ + * but since that doesn't exist yet, just open code it. + */ smp_mb(); /* We need to check again in a case another CPU has just - * made room available. */ + * made room available. + */ if (likely(E1000_DESC_UNUSED(tx_ring) < size)) return -EBUSY; @@ -3114,7 +3100,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) } static int e1000_maybe_stop_tx(struct net_device *netdev, - struct e1000_tx_ring *tx_ring, int size) + struct e1000_tx_ring *tx_ring, int size) { if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) return 0; @@ -3138,10 +3124,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, int tso; unsigned int f; - /* This goes back to the question of how to logically map a tx queue + /* This goes back to the question of how to logically map a Tx queue * to a flow. Right now, performance is impacted slightly negatively - * if using multiple tx queues. If the stack breaks away from a - * single qdisc implementation, we can look at this again. */ + * if using multiple Tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. + */ tx_ring = adapter->tx_ring; if (unlikely(skb->len <= 0)) { @@ -3166,7 +3153,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, * initiating the DMA for each buffer. The calc is: * 4 = ceil(buffer len/mss). To make sure we don't * overrun the FIFO, adjust the max buffer len if mss - * drops. */ + * drops. + */ if (mss) { u8 hdr_len; max_per_txd = min(mss << 2, max_per_txd); @@ -3182,8 +3170,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, * this hardware's requirements * NOTE: this is a TSO only workaround * if end byte alignment not correct move us - * into the next dword */ - if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) + * into the next dword + */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) + & 4) break; /* fall through */ pull_size = min((unsigned int)4, skb->data_len); @@ -3231,7 +3221,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, count += nr_frags; /* need: count + 2 desc gap to keep tail from touching - * head, otherwise try next time */ + * head, otherwise try next time + */ if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) return NETDEV_TX_BUSY; @@ -3270,7 +3261,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, tx_flags |= E1000_TX_FLAGS_NO_FCS; count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, - nr_frags, mss); + nr_frags, mss); if (count) { netdev_sent_queue(netdev, skb->len); @@ -3372,9 +3363,7 @@ static void e1000_dump(struct e1000_adapter *adapter) /* Print Registers */ e1000_regdump(adapter); - /* - * transmit dump - */ + /* transmit dump */ pr_info("TX Desc ring0 dump\n"); /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) @@ -3435,9 +3424,7 @@ static void e1000_dump(struct e1000_adapter *adapter) } rx_ring_summary: - /* - * receive dump - */ + /* receive dump */ pr_info("\nRX Desc ring dump\n"); /* Legacy Receive Descriptor Format @@ -3502,7 +3489,6 @@ exit: * e1000_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ - static void e1000_tx_timeout(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -3530,7 +3516,6 @@ static void e1000_reset_task(struct work_struct *work) * Returns the address of the device statistics structure. * The statistics are actually updated from the watchdog. **/ - static struct net_device_stats *e1000_get_stats(struct net_device *netdev) { /* only return the current stats */ @@ -3544,7 +3529,6 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev) * * Returns 0 on success, negative on failure **/ - static int e1000_change_mtu(struct net_device *netdev, int new_mtu) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -3581,8 +3565,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. * i.e. RXBUFFER_2048 --> size-4096 slab - * however with the new *_jumbo_rx* routines, jumbo receives will use - * fragmented skbs */ + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ if (max_frame <= E1000_RXBUFFER_2048) adapter->rx_buffer_len = E1000_RXBUFFER_2048; @@ -3617,7 +3602,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) * e1000_update_stats - Update the board statistics counters * @adapter: board private structure **/ - void e1000_update_stats(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -3628,8 +3612,7 @@ void e1000_update_stats(struct e1000_adapter *adapter) #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF - /* - * Prevent stats update while adapter is being reset, or if the pci + /* Prevent stats update while adapter is being reset, or if the pci * connection is down. */ if (adapter->link_speed == 0) @@ -3719,7 +3702,8 @@ void e1000_update_stats(struct e1000_adapter *adapter) /* Rx Errors */ /* RLEC on some newer hardware can be incorrect so build - * our own version based on RUC and ROC */ + * our own version based on RUC and ROC + */ netdev->stats.rx_errors = adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + @@ -3773,7 +3757,6 @@ void e1000_update_stats(struct e1000_adapter *adapter) * @irq: interrupt number * @data: pointer to a network interface device structure **/ - static irqreturn_t e1000_intr(int irq, void *data) { struct net_device *netdev = data; @@ -3784,8 +3767,7 @@ static irqreturn_t e1000_intr(int irq, void *data) if (unlikely((!icr))) return IRQ_NONE; /* Not our interrupt */ - /* - * we might have caused the interrupt, but the above + /* we might have caused the interrupt, but the above * read cleared it, and just in case the driver is * down there is nothing to do so return handled */ @@ -3811,7 +3793,8 @@ static irqreturn_t e1000_intr(int irq, void *data) __napi_schedule(&adapter->napi); } else { /* this really should not happen! if it does it is basically a - * bug, but not a hard error, so enable ints and continue */ + * bug, but not a hard error, so enable ints and continue + */ if (!test_bit(__E1000_DOWN, &adapter->flags)) e1000_irq_enable(adapter); } @@ -3825,7 +3808,8 @@ static irqreturn_t e1000_intr(int irq, void *data) **/ static int e1000_clean(struct napi_struct *napi, int budget) { - struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); int tx_clean_complete = 0, work_done = 0; tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); @@ -3916,11 +3900,12 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, if (adapter->detect_tx_hung) { /* Detect a transmit hang in hardware, this serializes the - * check with the clearing of time_stamp and movement of i */ + * check with the clearing of time_stamp and movement of i + */ adapter->detect_tx_hung = false; if (tx_ring->buffer_info[eop].time_stamp && time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + - (adapter->tx_timeout_factor * HZ)) && + (adapter->tx_timeout_factor * HZ)) && !(er32(STATUS) & E1000_STATUS_TXOFF)) { /* detected Tx unit hang */ @@ -3963,7 +3948,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, * @csum: receive descriptor csum field * @sk_buff: socket buffer with received data **/ - static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, u32 csum, struct sk_buff *skb) { @@ -3999,7 +3983,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, * e1000_consume_page - helper function **/ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, - u16 length) + u16 length) { bi->page = NULL; skb->len += length; @@ -4095,11 +4079,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, if (TBI_ACCEPT(hw, status, rx_desc->errors, length, last_byte)) { spin_lock_irqsave(&adapter->stats_lock, - irq_flags); + irq_flags); e1000_tbi_adjust_stats(hw, &adapter->stats, length, mapped); spin_unlock_irqrestore(&adapter->stats_lock, - irq_flags); + irq_flags); length--; } else { if (netdev->features & NETIF_F_RXALL) @@ -4107,7 +4091,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* recycle both page and skb */ buffer_info->skb = skb; /* an error means any chain goes out the window - * too */ + * too + */ if (rx_ring->rx_skb_top) dev_kfree_skb(rx_ring->rx_skb_top); rx_ring->rx_skb_top = NULL; @@ -4123,7 +4108,7 @@ process_skb: /* this is the beginning of a chain */ rxtop = skb; skb_fill_page_desc(rxtop, 0, buffer_info->page, - 0, length); + 0, length); } else { /* this is the middle of a chain */ skb_fill_page_desc(rxtop, @@ -4141,38 +4126,42 @@ process_skb: skb_shinfo(rxtop)->nr_frags, buffer_info->page, 0, length); /* re-use the current skb, we only consumed the - * page */ + * page + */ buffer_info->skb = skb; skb = rxtop; rxtop = NULL; e1000_consume_page(buffer_info, skb, length); } else { /* no chain, got EOP, this buf is the packet - * copybreak to save the put_page/alloc_page */ + * copybreak to save the put_page/alloc_page + */ if (length <= copybreak && skb_tailroom(skb) >= length) { u8 *vaddr; vaddr = kmap_atomic(buffer_info->page); - memcpy(skb_tail_pointer(skb), vaddr, length); + memcpy(skb_tail_pointer(skb), vaddr, + length); kunmap_atomic(vaddr); /* re-use the page, so don't erase - * buffer_info->page */ + * buffer_info->page + */ skb_put(skb, length); } else { skb_fill_page_desc(skb, 0, - buffer_info->page, 0, - length); + buffer_info->page, 0, + length); e1000_consume_page(buffer_info, skb, - length); + length); } } } /* Receive Checksum Offload XXX recompute due to CRC strip? */ e1000_rx_checksum(adapter, - (u32)(status) | - ((u32)(rx_desc->errors) << 24), - le16_to_cpu(rx_desc->csum), skb); + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); total_rx_bytes += (skb->len - 4); /* don't count FCS */ if (likely(!(netdev->features & NETIF_F_RXFCS))) @@ -4214,8 +4203,7 @@ next_desc: return cleaned; } -/* - * this should improve performance for small packets with large amounts +/* this should improve performance for small packets with large amounts * of reassembly being done in the stack */ static void e1000_check_copybreak(struct net_device *netdev, @@ -4319,9 +4307,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, last_byte)) { spin_lock_irqsave(&adapter->stats_lock, flags); e1000_tbi_adjust_stats(hw, &adapter->stats, - length, skb->data); + length, skb->data); spin_unlock_irqrestore(&adapter->stats_lock, - flags); + flags); length--; } else { if (netdev->features & NETIF_F_RXALL) @@ -4386,10 +4374,9 @@ next_desc: * @rx_ring: pointer to receive ring structure * @cleaned_count: number of buffers to allocate this pass **/ - static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, int cleaned_count) + struct e1000_rx_ring *rx_ring, int cleaned_count) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; @@ -4430,7 +4417,7 @@ check_page: if (!buffer_info->dma) { buffer_info->dma = dma_map_page(&pdev->dev, - buffer_info->page, 0, + buffer_info->page, 0, buffer_info->length, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { @@ -4460,7 +4447,8 @@ check_page: /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). */ + * such as IA-64). + */ wmb(); writel(i, adapter->hw.hw_addr + rx_ring->rdt); } @@ -4470,7 +4458,6 @@ check_page: * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended * @adapter: address of board private structure **/ - static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int cleaned_count) @@ -4541,8 +4528,7 @@ map_skb: break; /* while !buffer_info->skb */ } - /* - * XXX if it was allocated cleanly it will never map to a + /* XXX if it was allocated cleanly it will never map to a * boundary crossing */ @@ -4580,7 +4566,8 @@ map_skb: /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). */ + * such as IA-64). + */ wmb(); writel(i, hw->hw_addr + rx_ring->rdt); } @@ -4590,7 +4577,6 @@ map_skb: * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. * @adapter: **/ - static void e1000_smartspeed(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -4603,7 +4589,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter) if (adapter->smartspeed == 0) { /* If Master/Slave config fault is asserted twice, - * we assume back-to-back */ + * we assume back-to-back + */ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); @@ -4616,7 +4603,7 @@ static void e1000_smartspeed(struct e1000_adapter *adapter) adapter->smartspeed++; if (!e1000_phy_setup_autoneg(hw) && !e1000_read_phy_reg(hw, PHY_CTRL, - &phy_ctrl)) { + &phy_ctrl)) { phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); e1000_write_phy_reg(hw, PHY_CTRL, @@ -4647,7 +4634,6 @@ static void e1000_smartspeed(struct e1000_adapter *adapter) * @ifreq: * @cmd: **/ - static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { @@ -4666,7 +4652,6 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) * @ifreq: * @cmd: **/ - static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { @@ -4928,7 +4913,8 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) hw->autoneg = 0; /* Make sure dplx is at most 1 bit and lsb of speed is not set - * for the switch() below to work */ + * for the switch() below to work + */ if ((spd & 1) || (dplx & ~1)) goto err_inval; @@ -5131,8 +5117,7 @@ static void e1000_shutdown(struct pci_dev *pdev) } #ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs +/* Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c index 750fc0194f37..c9cde352b1c8 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_param.c +++ b/drivers/net/ethernet/intel/e1000/e1000_param.c @@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter); * value exists, a default value is used. The final value is stored * in a variable in the adapter structure. **/ - void e1000_check_options(struct e1000_adapter *adapter) { struct e1000_option opt; @@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter) .def = E1000_DEFAULT_RXD, .arg = { .r = { .min = E1000_MIN_RXD, - .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD }} }; @@ -408,7 +408,7 @@ void e1000_check_options(struct e1000_adapter *adapter) if (num_TxAbsIntDelay > bd) { adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; e1000_validate_option(&adapter->tx_abs_int_delay, &opt, - adapter); + adapter); } else { adapter->tx_abs_int_delay = opt.def; } @@ -426,7 +426,7 @@ void e1000_check_options(struct e1000_adapter *adapter) if (num_RxIntDelay > bd) { adapter->rx_int_delay = RxIntDelay[bd]; e1000_validate_option(&adapter->rx_int_delay, &opt, - adapter); + adapter); } else { adapter->rx_int_delay = opt.def; } @@ -444,7 +444,7 @@ void e1000_check_options(struct e1000_adapter *adapter) if (num_RxAbsIntDelay > bd) { adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; e1000_validate_option(&adapter->rx_abs_int_delay, &opt, - adapter); + adapter); } else { adapter->rx_abs_int_delay = opt.def; } @@ -479,16 +479,17 @@ void e1000_check_options(struct e1000_adapter *adapter) break; case 4: e_dev_info("%s set to simplified " - "(2000-8000) ints mode\n", opt.name); + "(2000-8000) ints mode\n", opt.name); adapter->itr_setting = adapter->itr; break; default: e1000_validate_option(&adapter->itr, &opt, - adapter); + adapter); /* save the setting, because the dynamic bits * change itr. * clear the lower two bits because they are - * used as control */ + * used as control + */ adapter->itr_setting = adapter->itr & ~3; break; } @@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter) * * Handles speed and duplex options on fiber adapters **/ - static void e1000_check_fiber_options(struct e1000_adapter *adapter) { int bd = adapter->bd_number; @@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter) * * Handles speed and duplex options on copper adapters **/ - static void e1000_check_copper_options(struct e1000_adapter *adapter) { struct e1000_option opt; @@ -681,22 +680,22 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter) e_dev_info("Using Autonegotiation at Half Duplex only\n"); adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | - ADVERTISE_100_HALF; + ADVERTISE_100_HALF; break; case FULL_DUPLEX: e_dev_info("Full Duplex specified without Speed\n"); e_dev_info("Using Autonegotiation at Full Duplex only\n"); adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | - ADVERTISE_100_FULL | - ADVERTISE_1000_FULL; + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; break; case SPEED_10: e_dev_info("10 Mbps Speed specified without Duplex\n"); e_dev_info("Using Autonegotiation at 10 Mbps only\n"); adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | - ADVERTISE_10_FULL; + ADVERTISE_10_FULL; break; case SPEED_10 + HALF_DUPLEX: e_dev_info("Forcing to 10 Mbps Half Duplex\n"); @@ -715,7 +714,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter) e_dev_info("Using Autonegotiation at 100 Mbps only\n"); adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | - ADVERTISE_100_FULL; + ADVERTISE_100_FULL; break; case SPEED_100 + HALF_DUPLEX: e_dev_info("Forcing to 100 Mbps Half Duplex\n"); diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index e73c2c355993..e0991388664c 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -32,69 +32,6 @@ #include "e1000.h" -#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 -#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 -#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 -#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F - -#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 -#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 -#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 - -#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 -#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 -#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 - -#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C -#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 - -#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ -#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 - -#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 -#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 - -/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ -#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */ -#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 -#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ -#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ -#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ - -/* PHY Specific Control Register 2 (Page 0, Register 26) */ -#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 - /* 1=Reverse Auto-Negotiation */ - -/* MAC Specific Control Register (Page 2, Register 21) */ -/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ -#define GG82563_MSCR_TX_CLK_MASK 0x0007 -#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 -#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 -#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 - -#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ - -/* DSP Distance Register (Page 5, Register 26) */ -#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M - 1 = 50-80M - 2 = 80-110M - 3 = 110-140M - 4 = >140M - */ - -/* Kumeran Mode Control Register (Page 193, Register 16) */ -#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 - -/* Max number of times Kumeran read/write should be validated */ -#define GG82563_MAX_KMRN_RETRY 0x5 - -/* Power Management Control Register (Page 193, Register 20) */ -#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 - /* 1=Enable SERDES Electrical Idle */ - -/* In-Band Control Register (Page 194, Register 18) */ -#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ - /* A table for the GG82563 cable length where the range is defined * with a lower bound at "index" and the upper bound at * "index + 5". @@ -111,11 +48,10 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); -static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw); -static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, - u16 *data); -static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, - u16 data); +static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data); static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); /** @@ -625,16 +561,16 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) e_dbg("GG82563 PSCR: %X\n", phy_data); - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); /* Reset the phy to commit changes. */ - phy_data |= MII_CR_RESET; + phy_data |= BMCR_RESET; - ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); if (ret_val) return ret_val; @@ -696,7 +632,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = 0; + s32 ret_val; u16 phy_data, index; ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); @@ -774,6 +710,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) ctrl = er32(CTRL); ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + e_dbg("Issuing a global reset to MAC\n"); ew32(CTRL, ctrl | E1000_CTRL_RST); e1000_release_phy_80003es2lan(hw); @@ -833,6 +772,8 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); + if (ret_val) + return ret_val; /* Disable IBIST slave mode (far-end loopback) */ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, @@ -1006,7 +947,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) return ret_val; /* SW Reset the PHY so all changes take effect */ - ret_val = e1000e_commit_phy(hw); + ret_val = hw->phy.ops.commit(hw); if (ret_val) { e_dbg("Error Resetting the PHY\n"); return ret_val; @@ -1272,7 +1213,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 *data) { u32 kmrnctrlsta; - s32 ret_val = 0; + s32 ret_val; ret_val = e1000_acquire_mac_csr_80003es2lan(hw); if (ret_val) @@ -1307,7 +1248,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 data) { u32 kmrnctrlsta; - s32 ret_val = 0; + s32 ret_val; ret_val = e1000_acquire_mac_csr_80003es2lan(hw); if (ret_val) @@ -1331,7 +1272,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, **/ static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) { - s32 ret_val = 0; + s32 ret_val; /* If there's an alternate MAC address place it in RAR0 * so that it will override the Si installed default perm @@ -1434,18 +1375,18 @@ static const struct e1000_phy_operations es2_phy_ops = { .acquire = e1000_acquire_phy_80003es2lan, .check_polarity = e1000_check_polarity_m88, .check_reset_block = e1000e_check_reset_block_generic, - .commit = e1000e_phy_sw_reset, - .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, - .get_cfg_done = e1000_get_cfg_done_80003es2lan, - .get_cable_length = e1000_get_cable_length_80003es2lan, - .get_info = e1000e_get_phy_info_m88, - .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, + .get_cfg_done = e1000_get_cfg_done_80003es2lan, + .get_cable_length = e1000_get_cable_length_80003es2lan, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, .release = e1000_release_phy_80003es2lan, - .reset = e1000e_phy_hw_reset_generic, - .set_d0_lplu_state = NULL, - .set_d3_lplu_state = e1000e_set_d3_lplu_state, - .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, - .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = NULL, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, + .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, }; static const struct e1000_nvm_operations es2_nvm_ops = { diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h new file mode 100644 index 000000000000..90d363b2d280 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h @@ -0,0 +1,95 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_80003ES2LAN_H_ +#define _E1000E_80003ES2LAN_H_ + +#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 +#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 +#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 +#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F + +#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 +#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 +#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 + +#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 +#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 +#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 + +#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C +#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 + +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */ +#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 + +#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 +#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 + +/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ +#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */ +#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 +#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ +#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ +#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ + +/* PHY Specific Control Register 2 (Page 0, Register 26) */ +#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */ + +/* MAC Specific Control Register (Page 2, Register 21) */ +/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ +#define GG82563_MSCR_TX_CLK_MASK 0x0007 +#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 +#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 +#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 + +#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ + +/* DSP Distance Register (Page 5, Register 26) + * 0 = <50M + * 1 = 50-80M + * 2 = 80-100M + * 3 = 110-140M + * 4 = >140M + */ +#define GG82563_DSPD_CABLE_LENGTH 0x0007 + +/* Kumeran Mode Control Register (Page 193, Register 16) */ +#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 + +/* Max number of times Kumeran read/write should be validated */ +#define GG82563_MAX_KMRN_RETRY 0x5 + +/* Power Management Control Register (Page 193, Register 20) */ +/* 1=Enable SERDES Electrical Idle */ +#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 + +/* In-Band Control Register (Page 194, Register 18) */ +#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index c77d010d5c59..2faffbde179e 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -44,21 +44,6 @@ #include "e1000.h" -#define ID_LED_RESERVED_F746 0xF746 -#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ - (ID_LED_OFF1_ON2 << 8) | \ - (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_DEF1_DEF2)) - -#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 -#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ -#define E1000_BASE1000T_STATUS 10 -#define E1000_IDLE_ERROR_COUNT_MASK 0xFF -#define E1000_RECEIVE_ERROR_COUNTER 21 -#define E1000_RECEIVE_ERROR_MAX 0xFFFF - -#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ - static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); @@ -67,9 +52,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); -static s32 e1000_setup_link_82571(struct e1000_hw *hw); static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); -static void e1000_clear_vfta_82571(struct e1000_hw *hw); static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); static s32 e1000_led_on_82574(struct e1000_hw *hw); static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); @@ -449,13 +432,13 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) break; case e1000_82574: case e1000_82583: - ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); + ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id); if (ret_val) return ret_val; phy->id = (u32)(phy_id << 16); udelay(20); - ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); + ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); if (ret_val) return ret_val; @@ -556,16 +539,14 @@ static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) s32 i = 0; extcnf_ctrl = er32(EXTCNF_CTRL); - extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; do { + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; ew32(EXTCNF_CTRL, extcnf_ctrl); extcnf_ctrl = er32(EXTCNF_CTRL); if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) break; - extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; - usleep_range(2000, 4000); i++; } while (i < MDIO_OWNERSHIP_TIMEOUT); @@ -937,6 +918,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) @@ -1329,9 +1312,10 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw) */ vfta_offset = (hw->mng_cookie.vlan_id >> E1000_VFTA_ENTRY_SHIFT) & - E1000_VFTA_ENTRY_MASK; - vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & - E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = + 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); } break; default: @@ -1399,7 +1383,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw) { u16 status_1kbt = 0; u16 receive_errors = 0; - s32 ret_val = 0; + s32 ret_val; /* Read PHY Receive Error counter first, if its is max - all F's then * read the Base1000T status register If both are max then PHY is hung. @@ -1544,13 +1528,12 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) ctrl = er32(CTRL); status = er32(STATUS); - rxcw = er32(RXCW); + er32(RXCW); /* SYNCH bit and IV bit are sticky */ udelay(10); rxcw = er32(RXCW); if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { - /* Receiver is synchronized with no invalid bits. */ switch (mac->serdes_link_state) { case e1000_serdes_link_autoneg_complete: @@ -1799,6 +1782,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) if (ret_val) return ret_val; ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; } } @@ -1812,7 +1797,7 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) { if (hw->mac.type == e1000_82571) { - s32 ret_val = 0; + s32 ret_val; /* If there's an alternate MAC address place it in RAR0 * so that it will override the Si installed default perm @@ -1931,7 +1916,7 @@ static const struct e1000_phy_operations e82_phy_ops_igp = { .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, .set_d3_lplu_state = e1000e_set_d3_lplu_state, .write_reg = e1000e_write_phy_reg_igp, - .cfg_on_link_up = NULL, + .cfg_on_link_up = NULL, }; static const struct e1000_phy_operations e82_phy_ops_m88 = { @@ -1940,7 +1925,7 @@ static const struct e1000_phy_operations e82_phy_ops_m88 = { .check_reset_block = e1000e_check_reset_block_generic, .commit = e1000e_phy_sw_reset, .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, - .get_cfg_done = e1000e_get_cfg_done, + .get_cfg_done = e1000e_get_cfg_done_generic, .get_cable_length = e1000e_get_cable_length_m88, .get_info = e1000e_get_phy_info_m88, .read_reg = e1000e_read_phy_reg_m88, @@ -1949,7 +1934,7 @@ static const struct e1000_phy_operations e82_phy_ops_m88 = { .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, .set_d3_lplu_state = e1000e_set_d3_lplu_state, .write_reg = e1000e_write_phy_reg_m88, - .cfg_on_link_up = NULL, + .cfg_on_link_up = NULL, }; static const struct e1000_phy_operations e82_phy_ops_bm = { @@ -1958,7 +1943,7 @@ static const struct e1000_phy_operations e82_phy_ops_bm = { .check_reset_block = e1000e_check_reset_block_generic, .commit = e1000e_phy_sw_reset, .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, - .get_cfg_done = e1000e_get_cfg_done, + .get_cfg_done = e1000e_get_cfg_done_generic, .get_cable_length = e1000e_get_cable_length_m88, .get_info = e1000e_get_phy_info_m88, .read_reg = e1000e_read_phy_reg_bm2, @@ -1967,7 +1952,7 @@ static const struct e1000_phy_operations e82_phy_ops_bm = { .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, .set_d3_lplu_state = e1000e_set_d3_lplu_state, .write_reg = e1000e_write_phy_reg_bm2, - .cfg_on_link_up = NULL, + .cfg_on_link_up = NULL, }; static const struct e1000_nvm_operations e82571_nvm_ops = { @@ -2044,6 +2029,7 @@ const struct e1000_info e1000_82574_info = { | FLAG_HAS_MSIX | FLAG_HAS_JUMBO_FRAMES | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP | FLAG_APME_IN_CTRL3 | FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_AMT @@ -2065,6 +2051,7 @@ const struct e1000_info e1000_82583_info = { .mac = e1000_82583, .flags = FLAG_HAS_HW_VLAN_FILTER | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP | FLAG_APME_IN_CTRL3 | FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_AMT diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h new file mode 100644 index 000000000000..85cb1a3b7cd4 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/82571.h @@ -0,0 +1,58 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_82571_H_ +#define _E1000E_82571_H_ + +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ + +/* Intr Throttling - RW */ +#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n))) + +#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAC_MASK_82574 0x01F00000 + +/* Manageability Operation Mode mask */ +#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 + +#define E1000_BASE1000T_STATUS 10 +#define E1000_IDLE_ERROR_COUNT_MASK 0xFF +#define E1000_RECEIVE_ERROR_COUNTER 21 +#define E1000_RECEIVE_ERROR_MAX 0xFFFF +bool e1000_check_phy_82574(struct e1000_hw *hw); +bool e1000e_get_laa_state_82571(struct e1000_hw *hw); +void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile index 591b71324505..c2dcfcc10857 100644 --- a/drivers/net/ethernet/intel/e1000e/Makefile +++ b/drivers/net/ethernet/intel/e1000e/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel PRO/1000 Linux driver -# Copyright(c) 1999 - 2012 Intel Corporation. +# Copyright(c) 1999 - 2013 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -34,5 +34,5 @@ obj-$(CONFIG_E1000E) += e1000e.o e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \ mac.o manage.o nvm.o phy.o \ - param.o ethtool.o netdev.o + param.o ethtool.o netdev.o ptp.o diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 4dab6fc265a2..fc3a4fe1ac71 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -29,25 +29,6 @@ #ifndef _E1000_DEFINES_H_ #define _E1000_DEFINES_H_ -#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ -#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ -#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ -#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ -#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ -#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ -#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ -#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ -#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ -#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ - /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ #define REQ_TX_DESCRIPTOR_MULTIPLE 8 #define REQ_RX_DESCRIPTOR_MULTIPLE 8 @@ -86,7 +67,6 @@ #define E1000_CTRL_EXT_EIAME 0x01000000 #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ -#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ #define E1000_CTRL_EXT_LSECCK 0x00001000 #define E1000_CTRL_EXT_PHYPDEN 0x00100000 @@ -107,6 +87,7 @@ #define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ #define E1000_RXDEXT_STATERR_CE 0x01000000 #define E1000_RXDEXT_STATERR_SE 0x02000000 #define E1000_RXDEXT_STATERR_SEQ 0x04000000 @@ -115,19 +96,19 @@ /* mask to determine if packets should be dropped due to frame errors */ #define E1000_RXD_ERR_FRAME_ERR_MASK ( \ - E1000_RXD_ERR_CE | \ - E1000_RXD_ERR_SE | \ - E1000_RXD_ERR_SEQ | \ - E1000_RXD_ERR_CXE | \ - E1000_RXD_ERR_RXE) + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) /* Same mask, but for extended and packet split descriptors */ #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ - E1000_RXDEXT_STATERR_CE | \ - E1000_RXDEXT_STATERR_SE | \ - E1000_RXDEXT_STATERR_SEQ | \ - E1000_RXDEXT_STATERR_CXE | \ - E1000_RXDEXT_STATERR_RXE) + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) #define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 #define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 @@ -242,9 +223,9 @@ #define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ #define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ -/* Bit definitions for the Management Data IO (MDIO) and Management Data - * Clock (MDC) pins in the Device Control Register. - */ +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 + +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 /* Device Status */ #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ @@ -260,8 +241,6 @@ #define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ -/* Constants used to interpret the masked PCI-X bus speed. */ - #define HALF_DUPLEX 1 #define FULL_DUPLEX 2 @@ -274,14 +253,15 @@ #define ADVERTISE_1000_FULL 0x0020 /* 1000/H is not supported, nor spec-compliant. */ -#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ - ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ - ADVERTISE_1000_FULL) -#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ - ADVERTISE_100_HALF | ADVERTISE_100_FULL) -#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) -#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) -#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) +#define E1000_ALL_SPEED_DUPLEX ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) #define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX @@ -319,6 +299,7 @@ #define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ #define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ #define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ /* Transmit Control */ #define E1000_TCTL_EN 0x00000002 /* enable Tx */ @@ -328,8 +309,6 @@ #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ #define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ -/* Transmit Arbitration Count */ - /* SerDes Control */ #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 @@ -384,10 +363,15 @@ #define E1000_KABGTXD_BGSQLBIAS 0x00050000 +/* Low Power IDLE Control */ +#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */ + /* PBA constants */ #define E1000_PBA_8K 0x0008 /* 8KB */ #define E1000_PBA_16K 0x0010 /* 16KB */ +#define E1000_PBA_RXA_MASK 0xFFFF + #define E1000_PBS_16K E1000_PBA_16K /* Uncorrectable/correctable ECC Error counts and enable bits */ @@ -439,11 +423,11 @@ * o LSC = Link Status Change */ #define IMS_ENABLE_MASK ( \ - E1000_IMS_RXT0 | \ - E1000_IMS_TXDW | \ - E1000_IMS_RXDMT0 | \ - E1000_IMS_RXSEQ | \ - E1000_IMS_LSC) + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) /* Interrupt Mask Set */ #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ @@ -542,6 +526,28 @@ #define E1000_RXCW_C 0x20000000 /* Receive config */ #define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ + +#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000 + +#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000 + +#define E1000_TIMINCA_INCPERIOD_SHIFT 24 +#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF + /* PCI Express Control */ #define E1000_GCR_RXD_NO_SNOOP 0x00000001 #define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 @@ -557,66 +563,6 @@ E1000_GCR_TXDSCW_NO_SNOOP | \ E1000_GCR_TXDSCR_NO_SNOOP) -/* PHY Control Register */ -#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ -#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ -#define MII_CR_POWER_DOWN 0x0800 /* Power down */ -#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ -#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ -#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ -#define MII_CR_SPEED_1000 0x0040 -#define MII_CR_SPEED_100 0x2000 -#define MII_CR_SPEED_10 0x0000 - -/* PHY Status Register */ -#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ -#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ - -/* Autoneg Advertisement Register */ -#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ -#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ -#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ -#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ -#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ -#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ - -/* Link Partner Ability Register (Base Page) */ -#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ -#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ -#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ - -/* Autoneg Expansion Register */ -#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ - -/* 1000BASE-T Control Register */ -#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ -#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ - /* 0=DTE device */ -#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ - /* 0=Configure PHY as Slave */ -#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ - /* 0=Automatic Master/Slave config */ - -/* 1000BASE-T Status Register */ -#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ -#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ - - -/* PHY 1000 MII Register/Bit Definitions */ -/* PHY Registers defined by IEEE */ -#define PHY_CONTROL 0x00 /* Control Register */ -#define PHY_STATUS 0x01 /* Status Register */ -#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ -#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ -#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ -#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ -#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ -#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ -#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ -#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ - -#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ - /* NVM Control */ #define E1000_EECD_SK 0x00000001 /* NVM Clock */ #define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ @@ -648,6 +594,10 @@ /* NVM Word Offsets */ #define NVM_COMPAT 0x0003 #define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_FUTURE_INIT_WORD1 0x0019 +#define NVM_COMPAT_VALID_CSUM 0x0001 +#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 + #define NVM_INIT_CONTROL2_REG 0x000F #define NVM_INIT_CONTROL3_PORT_B 0x0014 #define NVM_INIT_3GIO_3 0x001A @@ -656,8 +606,6 @@ #define NVM_ALT_MAC_ADDR_PTR 0x0037 #define NVM_CHECKSUM_REG 0x003F -#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ - #define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ #define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ @@ -766,9 +714,6 @@ #define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* Auto crossover enabled all speeds */ #define M88E1000_PSCR_AUTO_X_MODE 0x0060 -/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold) - * 0=Normal 10BASE-T Rx Threshold - */ #define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ /* M88E1000 PHY Specific Status Register */ @@ -804,11 +749,6 @@ /* BME1000 PHY Specific Control Register */ #define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ - -#define PHY_PAGE_SHIFT 5 -#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ - ((reg) & MAX_PHY_REG_ADDRESS)) - /* Bits... * 15-5: page * 4-0: register offset @@ -855,8 +795,4 @@ /* SerDes Control */ #define E1000_GEN_POLL_TIMEOUT 640 -/* FW Semaphore */ -#define E1000_FWSM_WLOCK_MAC_MASK 0x0380 -#define E1000_FWSM_WLOCK_MAC_SHIFT 7 - #endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 7e95f221d60b..fcc758138b8a 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -41,7 +41,11 @@ #include <linux/pci-aspm.h> #include <linux/crc32.h> #include <linux/if_vlan.h> - +#include <linux/clocksource.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/ptp_classify.h> +#include <linux/mii.h> #include "hw.h" struct e1000_info; @@ -75,9 +79,6 @@ struct e1000_info; #define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ #define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ -/* Early Receive defines */ -#define E1000_ERT_2048 0x100 - #define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ /* How many Tx Descriptors do we need to call netif_wake_queue ? */ @@ -94,70 +95,6 @@ struct e1000_info; #define DEFAULT_JUMBO 9234 -/* BM/HV Specific Registers */ -#define BM_PORT_CTRL_PAGE 769 - -#define PHY_UPPER_SHIFT 21 -#define BM_PHY_REG(page, reg) \ - (((reg) & MAX_PHY_REG_ADDRESS) |\ - (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\ - (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT))) - -/* PHY Wakeup Registers and defines */ -#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) -#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) -#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) -#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) -#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) -#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) -#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) -#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) -#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) -#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) - -#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ -#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ -#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ -#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ -#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ -#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ -#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ - -#define HV_STATS_PAGE 778 -#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */ -#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) -#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */ -#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) -#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */ -#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) -#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */ -#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) -#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */ -#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) -#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ -#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) -#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */ -#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) - -#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ - -/* BM PHY Copper Specific Status */ -#define BM_CS_STATUS 17 -#define BM_CS_STATUS_LINK_UP 0x0400 -#define BM_CS_STATUS_RESOLVED 0x0800 -#define BM_CS_STATUS_SPEED_MASK 0xC000 -#define BM_CS_STATUS_SPEED_1000 0x8000 - -/* 82577 Mobile Phy Status Register */ -#define HV_M_STATUS 26 -#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 -#define HV_M_STATUS_SPEED_MASK 0x0300 -#define HV_M_STATUS_SPEED_1000 0x0200 -#define HV_M_STATUS_LINK_UP 0x0040 - -#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ -#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 - /* Time to wait before putting the device into D3 if there's no link (in ms). */ #define LINK_TIMEOUT 100 @@ -355,6 +292,7 @@ struct e1000_adapter { u64 gorc_old; u32 alloc_rx_buff_failed; u32 rx_dma_failed; + u32 rx_hwtstamp_cleared; unsigned int rx_ps_pages; u16 rx_ps_bsize0; @@ -368,7 +306,7 @@ struct e1000_adapter { /* structs defined in e1000_hw.h */ struct e1000_hw hw; - spinlock_t stats64_lock; + spinlock_t stats64_lock; /* protects statistics counters */ struct e1000_hw_stats stats; struct e1000_phy_info phy_info; struct e1000_phy_stats phy_stats; @@ -404,6 +342,16 @@ struct e1000_adapter { u16 tx_ring_count; u16 rx_ring_count; + + struct hwtstamp_config hwtstamp_config; + struct delayed_work systim_overflow_work; + struct sk_buff *tx_hwtstamp_skb; + struct work_struct tx_hwtstamp_work; + spinlock_t systim_lock; /* protects SYSTIML/H regsters */ + struct cyclecounter cc; + struct timecounter tc; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; }; struct e1000_info { @@ -418,6 +366,40 @@ struct e1000_info { const struct e1000_nvm_operations *nvm_ops; }; +s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); + +/* The system time is maintained by a 64-bit counter comprised of the 32-bit + * SYSTIMH and SYSTIML registers. How the counter increments (and therefore + * its resolution) is based on the contents of the TIMINCA register - it + * increments every incperiod (bits 31:24) clock ticks by incvalue (bits 23:0). + * For the best accuracy, the incperiod should be as small as possible. The + * incvalue is scaled by a factor as large as possible (while still fitting + * in bits 23:0) so that relatively small clock corrections can be made. + * + * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of + * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n) + * bits to count nanoseconds leaving the rest for fractional nonseconds. + */ +#define INCVALUE_96MHz 125 +#define INCVALUE_SHIFT_96MHz 17 +#define INCPERIOD_SHIFT_96MHz 2 +#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz) + +#define INCVALUE_25MHz 40 +#define INCVALUE_SHIFT_25MHz 18 +#define INCPERIOD_25MHz 1 + +/* Another drawback of scaling the incvalue by a large factor is the + * 64-bit SYSTIM register overflows more quickly. This is dealt with + * by simply reading the clock before it overflows. + * + * Clock ns bits Overflows after + * ~~~~~~ ~~~~~~~ ~~~~~~~~~~~~~~~ + * 96MHz 47-bit 2^(47-INCPERIOD_SHIFT_96MHz) / 10^9 / 3600 = 9.77 hrs + * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours + */ +#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4) + /* hardware capability, feature, and workaround flags */ #define FLAG_HAS_AMT (1 << 0) #define FLAG_HAS_FLASH (1 << 1) @@ -433,7 +415,7 @@ struct e1000_info { #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) #define FLAG_IS_QUAD_PORT_A (1 << 12) #define FLAG_IS_QUAD_PORT (1 << 13) -/* reserved bit14 */ +#define FLAG_HAS_HW_TIMESTAMP (1 << 14) #define FLAG_APME_IN_WUC (1 << 15) #define FLAG_APME_IN_CTRL3 (1 << 16) #define FLAG_APME_CHECK_PORT_B (1 << 17) @@ -449,7 +431,7 @@ struct e1000_info { #define FLAG_MSI_ENABLED (1 << 27) /* reserved (1 << 28) */ #define FLAG_TSO_FORCE (1 << 29) -#define FLAG_RX_RESTART_NOW (1 << 30) +#define FLAG_RESTART_NOW (1 << 30) #define FLAG_MSI_TEST_FAILED (1 << 31) #define FLAG2_CRC_STRIPPING (1 << 0) @@ -465,6 +447,7 @@ struct e1000_info { #define FLAG2_NO_DISABLE_RX (1 << 10) #define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) #define FLAG2_DFLT_CRC_STRIPPING (1 << 12) +#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) @@ -514,8 +497,6 @@ extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr); extern unsigned int copybreak; -extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw); - extern const struct e1000_info e1000_82571_info; extern const struct e1000_info e1000_82572_info; extern const struct e1000_info e1000_82573_info; @@ -529,138 +510,8 @@ extern const struct e1000_info e1000_pch2_info; extern const struct e1000_info e1000_pch_lpt_info; extern const struct e1000_info e1000_es2_info; -extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, - u32 pba_num_size); - -extern s32 e1000e_commit_phy(struct e1000_hw *hw); - -extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); - -extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw); -extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); - -extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); -extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, - bool state); -extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); -extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); -extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); -extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw); -extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); -extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); -extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); - -extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); -extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); -extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); -extern s32 e1000e_setup_led_generic(struct e1000_hw *hw); -extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); -extern s32 e1000e_led_on_generic(struct e1000_hw *hw); -extern s32 e1000e_led_off_generic(struct e1000_hw *hw); -extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); -extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); -extern void e1000_set_lan_id_single_port(struct e1000_hw *hw); -extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); -extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); -extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); -extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); -extern s32 e1000e_id_led_init_generic(struct e1000_hw *hw); -extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); -extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); -extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); -extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); -extern s32 e1000e_setup_link_generic(struct e1000_hw *hw); -extern void e1000_clear_vfta_generic(struct e1000_hw *hw); -extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); -extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, - u8 *mc_addr_list, - u32 mc_addr_count); -extern void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); -extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); -extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); -extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); -extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); -extern void e1000e_config_collision_dist_generic(struct e1000_hw *hw); -extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); -extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); -extern s32 e1000e_blink_led_generic(struct e1000_hw *hw); -extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); -extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); -extern void e1000e_reset_adaptive(struct e1000_hw *hw); -extern void e1000e_update_adaptive(struct e1000_hw *hw); - -extern s32 e1000e_setup_copper_link(struct e1000_hw *hw); -extern s32 e1000e_get_phy_id(struct e1000_hw *hw); -extern void e1000e_put_hw_semaphore(struct e1000_hw *hw); -extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw); -extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); -extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); -extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); -extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); -extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, - u16 *data); -extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); -extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); -extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); -extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, - u16 data); -extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); -extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); -extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); -extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw); -extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); -extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); -extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw); -extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); -extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); -extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); -extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, - u16 *phy_reg); -extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, - u16 *phy_reg); -extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); -extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); -extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); -extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, - u16 data); -extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, - u16 *data); -extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, - u32 usec_interval, bool *success); -extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); -extern void e1000_power_up_phy_copper(struct e1000_hw *hw); -extern void e1000_power_down_phy_copper(struct e1000_hw *hw); -extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); -extern s32 e1000e_check_downshift(struct e1000_hw *hw); -extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, - u16 *data); -extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, - u16 *data); -extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); -extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, - u16 data); -extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, - u16 data); -extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); -extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); -extern s32 e1000_check_polarity_82577(struct e1000_hw *hw); -extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw); -extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); -extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw); - -extern s32 e1000_check_polarity_m88(struct e1000_hw *hw); -extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw); -extern s32 e1000_check_polarity_ife(struct e1000_hw *hw); -extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); -extern s32 e1000_check_polarity_igp(struct e1000_hw *hw); -extern bool e1000_check_phy_82574(struct e1000_hw *hw); +extern void e1000e_ptp_init(struct e1000_adapter *adapter); +extern void e1000e_ptp_remove(struct e1000_adapter *adapter); static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) { @@ -687,20 +538,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data) return hw->phy.ops.write_reg_locked(hw, offset, data); } -static inline s32 e1000_get_cable_length(struct e1000_hw *hw) -{ - return hw->phy.ops.get_cable_length(hw); -} - -extern s32 e1000e_acquire_nvm(struct e1000_hw *hw); -extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); -extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); -extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); -extern void e1000e_release_nvm(struct e1000_hw *hw); extern void e1000e_reload_nvm_generic(struct e1000_hw *hw); -extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) { @@ -735,10 +573,6 @@ static inline s32 e1000_get_phy_info(struct e1000_hw *hw) return hw->phy.ops.get_info(hw); } -extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw); -extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); -extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); - static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) { return readl(hw->hw_addr + reg); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index fd4772a2691c..2c1813737f6d 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/vmalloc.h> +#include <linux/mdio.h> #include "e1000.h" @@ -98,7 +99,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = { E1000_STAT("rx_flow_control_xoff", stats.xoffrxc), E1000_STAT("tx_flow_control_xon", stats.xontxc), E1000_STAT("tx_flow_control_xoff", stats.xofftxc), - E1000_STAT("rx_long_byte_count", stats.gorc), E1000_STAT("rx_csum_offload_good", hw_csum_good), E1000_STAT("rx_csum_offload_errors", hw_csum_err), E1000_STAT("rx_header_split", rx_hdr_split), @@ -108,6 +108,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = { E1000_STAT("dropped_smbus", stats.mgpdc), E1000_STAT("rx_dma_failed", rx_dma_failed), E1000_STAT("tx_dma_failed", tx_dma_failed), + E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), E1000_STAT("uncorr_ecc_errors", uncorr_errors), E1000_STAT("corr_ecc_errors", corr_errors), }; @@ -129,7 +130,6 @@ static int e1000_get_settings(struct net_device *netdev, u32 speed; if (hw->phy.media_type == e1000_media_type_copper) { - ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | @@ -327,12 +327,12 @@ static int e1000_set_settings(struct net_device *netdev, } /* reset the link */ - if (netif_running(adapter->netdev)) { e1000e_down(adapter); e1000e_up(adapter); - } else + } else { e1000e_reset(adapter); + } clear_bit(__E1000_RESETTING, &adapter->state); return 0; @@ -417,7 +417,7 @@ static void e1000_set_msglevel(struct net_device *netdev, u32 data) adapter->msg_enable = data; } -static int e1000_get_regs_len(struct net_device *netdev) +static int e1000_get_regs_len(struct net_device __always_unused *netdev) { #define E1000_REGS_LEN 32 /* overestimate */ return E1000_REGS_LEN * sizeof(u32); @@ -471,10 +471,10 @@ static void e1000_get_regs(struct net_device *netdev, regs_buff[22] = adapter->phy_stats.receive_errors; regs_buff[23] = regs_buff[13]; /* mdix mode */ } - regs_buff[21] = 0; /* was idle_errors */ - e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); - regs_buff[24] = (u32)phy_data; /* phy local receiver status */ - regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + regs_buff[21] = 0; /* was idle_errors */ + e1e_rphy(hw, MII_STAT1000, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ } static int e1000_get_eeprom_len(struct net_device *netdev) @@ -761,8 +761,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, (test[pat] & write)); val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); if (val != (test[pat] & write & mask)) { - e_err("pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", - reg + offset, val, (test[pat] & write & mask)); + e_err("pattern test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", + reg + (offset << 2), val, + (test[pat] & write & mask)); *data = reg; return 1; } @@ -777,7 +778,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, __ew32(&adapter->hw, reg, write & mask); val = __er32(&adapter->hw, reg); if ((write & mask) != (val & mask)) { - e_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", reg, (val & mask), (write & mask)); *data = reg; return 1; @@ -885,12 +886,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) E1000_FWSM_WLOCK_MAC_SHIFT; for (i = 0; i < mac->rar_entry_count; i++) { - /* Cannot test write-protected SHRAL[n] registers */ - if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) - continue; + if (mac->type == e1000_pch_lpt) { + /* Cannot test write-protected SHRAL[n] registers */ + if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) + continue; - REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), - mask, 0xFFFFFFFF); + /* SHRAH[9] different than the others */ + if (i == 10) + mask |= (1 << 30); + else + mask &= ~(1 << 30); + } + + REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask, + 0xFFFFFFFF); } for (i = 0; i < mac->mta_reg_count; i++) @@ -924,7 +933,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) return *data; } -static irqreturn_t e1000_test_intr(int irq, void *data) +static irqreturn_t e1000_test_intr(int __always_unused irq, void *data) { struct net_device *netdev = (struct net_device *) data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1274,7 +1283,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) if (hw->phy.type == e1000_phy_ife) { /* force 100, set loopback */ - e1e_wphy(hw, PHY_CONTROL, 0x6100); + e1e_wphy(hw, MII_BMCR, 0x6100); /* Now set up the MAC to the same speed/duplex as the PHY. */ ctrl_reg = er32(CTRL); @@ -1297,9 +1306,9 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) /* Auto-MDI/MDIX Off */ e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); /* reset to update Auto-MDI/MDIX */ - e1e_wphy(hw, PHY_CONTROL, 0x9140); + e1e_wphy(hw, MII_BMCR, 0x9140); /* autoneg off */ - e1e_wphy(hw, PHY_CONTROL, 0x8140); + e1e_wphy(hw, MII_BMCR, 0x8140); break; case e1000_phy_gg82563: e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); @@ -1311,7 +1320,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) phy_reg |= 0x006; e1e_wphy(hw, PHY_REG(2, 21), phy_reg); /* Assert SW reset for above settings to take effect */ - e1000e_commit_phy(hw); + hw->phy.ops.commit(hw); mdelay(1); /* Force Full Duplex */ e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); @@ -1345,7 +1354,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); /* Enable loopback on the PHY */ -#define I82577_PHY_LBK_CTRL 19 e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001); break; default: @@ -1353,7 +1361,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) } /* force 1000, set loopback */ - e1e_wphy(hw, PHY_CONTROL, 0x4140); + e1e_wphy(hw, MII_BMCR, 0x4140); mdelay(250); /* Now set up the MAC to the same speed/duplex as the PHY. */ @@ -1395,7 +1403,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl = er32(CTRL); - int link = 0; + int link; /* special requirements for 82571/82572 fiber adapters */ @@ -1528,11 +1536,12 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) hw->mac.autoneg = 1; if (hw->phy.type == e1000_phy_gg82563) e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); - e1e_rphy(hw, PHY_CONTROL, &phy_reg); - if (phy_reg & MII_CR_LOOPBACK) { - phy_reg &= ~MII_CR_LOOPBACK; - e1e_wphy(hw, PHY_CONTROL, phy_reg); - e1000e_commit_phy(hw); + e1e_rphy(hw, MII_BMCR, &phy_reg); + if (phy_reg & BMCR_LOOPBACK) { + phy_reg &= ~BMCR_LOOPBACK; + e1e_wphy(hw, MII_BMCR, phy_reg); + if (hw->phy.ops.commit) + hw->phy.ops.commit(hw); } break; } @@ -1694,7 +1703,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) return *data; } -static int e1000e_get_sset_count(struct net_device *netdev, int sset) +static int e1000e_get_sset_count(struct net_device __always_unused *netdev, + int sset) { switch (sset) { case ETH_SS_TEST: @@ -1957,7 +1967,7 @@ static int e1000_nway_reset(struct net_device *netdev) } static void e1000_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, + struct ethtool_stats __always_unused *stats, u64 *data) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1986,8 +1996,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, } } -static void e1000_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) +static void e1000_get_strings(struct net_device __always_unused *netdev, + u32 stringset, u8 *data) { u8 *p = data; int i; @@ -2007,7 +2017,8 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset, } static int e1000_get_rxnfc(struct net_device *netdev, - struct ethtool_rxnfc *info, u32 *rule_locs) + struct ethtool_rxnfc *info, + u32 __always_unused *rule_locs) { info->data = 0; @@ -2053,6 +2064,171 @@ static int e1000_get_rxnfc(struct net_device *netdev, } } +static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 cap_addr, adv_addr, lpa_addr, pcs_stat_addr, phy_data, lpi_ctrl; + u32 status, ret_val; + + if (!(adapter->flags & FLAG_IS_ICH) || + !(adapter->flags2 & FLAG2_HAS_EEE)) + return -EOPNOTSUPP; + + switch (hw->phy.type) { + case e1000_phy_82579: + cap_addr = I82579_EEE_CAPABILITY; + adv_addr = I82579_EEE_ADVERTISEMENT; + lpa_addr = I82579_EEE_LP_ABILITY; + pcs_stat_addr = I82579_EEE_PCS_STATUS; + break; + case e1000_phy_i217: + cap_addr = I217_EEE_CAPABILITY; + adv_addr = I217_EEE_ADVERTISEMENT; + lpa_addr = I217_EEE_LP_ABILITY; + pcs_stat_addr = I217_EEE_PCS_STATUS; + break; + default: + return -EOPNOTSUPP; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return -EBUSY; + + /* EEE Capability */ + ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data); + if (ret_val) + goto release; + edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data); + + /* EEE Advertised */ + ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &phy_data); + if (ret_val) + goto release; + edata->advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + + /* EEE Link Partner Advertised */ + ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data); + if (ret_val) + goto release; + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + + /* EEE PCS Status */ + ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data); + if (hw->phy.type == e1000_phy_82579) + phy_data <<= 8; + +release: + hw->phy.ops.release(hw); + if (ret_val) + return -ENODATA; + + e1e_rphy(hw, I82579_LPI_CTRL, &lpi_ctrl); + status = er32(STATUS); + + /* Result of the EEE auto negotiation - there is no register that + * has the status of the EEE negotiation so do a best-guess based + * on whether both Tx and Rx LPI indications have been received or + * base it on the link speed, the EEE advertised speeds on both ends + * and the speeds on which EEE is enabled locally. + */ + if (((phy_data & E1000_EEE_TX_LPI_RCVD) && + (phy_data & E1000_EEE_RX_LPI_RCVD)) || + ((status & E1000_STATUS_SPEED_100) && + (edata->advertised & ADVERTISED_100baseT_Full) && + (edata->lp_advertised & ADVERTISED_100baseT_Full) && + (lpi_ctrl & I82579_LPI_CTRL_100_ENABLE)) || + ((status & E1000_STATUS_SPEED_1000) && + (edata->advertised & ADVERTISED_1000baseT_Full) && + (edata->lp_advertised & ADVERTISED_1000baseT_Full) && + (lpi_ctrl & I82579_LPI_CTRL_1000_ENABLE))) + edata->eee_active = true; + + edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable; + edata->tx_lpi_enabled = true; + edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT; + + return 0; +} + +static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + if (!(adapter->flags & FLAG_IS_ICH) || + !(adapter->flags2 & FLAG2_HAS_EEE)) + return -EOPNOTSUPP; + + ret_val = e1000e_get_eee(netdev, &eee_curr); + if (ret_val) + return ret_val; + + if (eee_curr.advertised != edata->advertised) { + e_err("Setting EEE advertisement is not supported\n"); + return -EINVAL; + } + + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_err("Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_curr.tx_lpi_timer != edata->tx_lpi_timer) { + e_err("Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (hw->dev_spec.ich8lan.eee_disable != !edata->eee_enabled) { + hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; + + /* reset the link */ + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + } + + return 0; +} + +static int e1000e_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + ethtool_op_get_ts_info(netdev, info); + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return 0; + + info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE); + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_ALL)); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + + return 0; +} + static const struct ethtool_ops e1000_ethtool_ops = { .get_settings = e1000_get_settings, .set_settings = e1000_set_settings, @@ -2080,7 +2256,9 @@ static const struct ethtool_ops e1000_ethtool_ops = { .get_coalesce = e1000_get_coalesce, .set_coalesce = e1000_set_coalesce, .get_rxnfc = e1000_get_rxnfc, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = e1000e_get_ts_info, + .get_eee = e1000e_get_eee, + .set_eee = e1000e_set_eee, }; void e1000e_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index b88676ff3d86..1e6b889aee87 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -29,332 +29,10 @@ #ifndef _E1000_HW_H_ #define _E1000_HW_H_ -#include <linux/types.h> - -struct e1000_hw; -struct e1000_adapter; - +#include "regs.h" #include "defines.h" -enum e1e_registers { - E1000_CTRL = 0x00000, /* Device Control - RW */ - E1000_STATUS = 0x00008, /* Device Status - RO */ - E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */ - E1000_EERD = 0x00014, /* EEPROM Read - RW */ - E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */ - E1000_FLA = 0x0001C, /* Flash Access - RW */ - E1000_MDIC = 0x00020, /* MDI Control - RW */ - E1000_SCTL = 0x00024, /* SerDes Control - RW */ - E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ - E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ - E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */ - E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ - E1000_FCT = 0x00030, /* Flow Control Type - RW */ - E1000_VET = 0x00038, /* VLAN Ether Type - RW */ - E1000_FEXTNVM3 = 0x0003C, /* Future Extended NVM 3 - RW */ - E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */ - E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */ - E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ - E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ - E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ - E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */ - E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ - E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */ - E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */ -#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2)) - E1000_RCTL = 0x00100, /* Rx Control - RW */ - E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ - E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */ - E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */ - E1000_TCTL = 0x00400, /* Tx Control - RW */ - E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */ - E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */ - E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */ - E1000_LEDCTL = 0x00E00, /* LED Control - RW */ - E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ - E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ - E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ -#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ - E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ - E1000_PBS = 0x01008, /* Packet Buffer Size */ - E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */ - E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ - E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ - E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ - E1000_PBA_ECC = 0x01100, /* PBA ECC Register */ - E1000_ERT = 0x02008, /* Early Rx Threshold - RW */ - E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ - E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ - E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ -/* Convenience macros - * - * Note: "_n" is the queue number of the register to be written to. - * - * Example usage: - * E1000_RDBAL(current_rx_queue) - */ - E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */ -#define E1000_RDBAL(_n) (E1000_RDBAL_BASE + (_n << 8)) - E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */ -#define E1000_RDBAH(_n) (E1000_RDBAH_BASE + (_n << 8)) - E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */ -#define E1000_RDLEN(_n) (E1000_RDLEN_BASE + (_n << 8)) - E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */ -#define E1000_RDH(_n) (E1000_RDH_BASE + (_n << 8)) - E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */ -#define E1000_RDT(_n) (E1000_RDT_BASE + (_n << 8)) - E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ - E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ -#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) - E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ - - E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ - E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */ -#define E1000_TDBAL(_n) (E1000_TDBAL_BASE + (_n << 8)) - E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */ -#define E1000_TDBAH(_n) (E1000_TDBAH_BASE + (_n << 8)) - E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */ -#define E1000_TDLEN(_n) (E1000_TDLEN_BASE + (_n << 8)) - E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */ -#define E1000_TDH(_n) (E1000_TDH_BASE + (_n << 8)) - E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */ -#define E1000_TDT(_n) (E1000_TDT_BASE + (_n << 8)) - E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ - E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */ -#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8)) - E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */ - E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */ -#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8)) - E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ - E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ - E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ - E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */ - E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */ - E1000_SCC = 0x04014, /* Single Collision Count - R/clr */ - E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */ - E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */ - E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */ - E1000_COLC = 0x04028, /* Collision Count - R/clr */ - E1000_DC = 0x04030, /* Defer Count - R/clr */ - E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */ - E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */ - E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */ - E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */ - E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */ - E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */ - E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */ - E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */ - E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */ - E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */ - E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */ - E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */ - E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */ - E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */ - E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */ - E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */ - E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */ - E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */ - E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */ - E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */ - E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */ - E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */ - E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */ - E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */ - E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */ - E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */ - E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */ - E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */ - E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */ - E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */ - E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */ - E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */ - E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */ - E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */ - E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */ - E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */ - E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */ - E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */ - E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */ - E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */ - E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */ - E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */ - E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */ - E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */ - E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */ - E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */ - E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */ - E1000_IAC = 0x04100, /* Interrupt Assertion Count */ - E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */ - E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */ - E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */ - E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */ - E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */ - E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */ - E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ - E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ - E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */ - E1000_RFCTL = 0x05008, /* Receive Filter Control */ - E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ - E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */ -#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8)) -#define E1000_RA (E1000_RAL(0)) - E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */ -#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8)) - E1000_SHRAL_PCH_LPT_BASE = 0x05408, -#define E1000_SHRAL_PCH_LPT(_n) (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8)) - E1000_SHRAH_PCH_LTP_BASE = 0x0540C, -#define E1000_SHRAH_PCH_LPT(_n) (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8)) - E1000_SHRAL_BASE = 0x05438, /* Shared Receive Address Low - RW */ -#define E1000_SHRAL(_n) (E1000_SHRAL_BASE + ((_n) * 8)) - E1000_SHRAH_BASE = 0x0543C, /* Shared Receive Address High - RW */ -#define E1000_SHRAH(_n) (E1000_SHRAH_BASE + ((_n) * 8)) - E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ - E1000_WUC = 0x05800, /* Wakeup Control - RW */ - E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ - E1000_WUS = 0x05810, /* Wakeup Status - RO */ - E1000_MRQC = 0x05818, /* Multiple Receive Control - RW */ - E1000_MANC = 0x05820, /* Management Control - RW */ - E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */ - E1000_HOST_IF = 0x08800, /* Host Interface */ - - E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */ - E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ - E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */ -#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4)) - E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ - E1000_GCR = 0x05B00, /* PCI-Ex Control */ - E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */ - E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ - E1000_SWSM = 0x05B50, /* SW Semaphore */ - E1000_FWSM = 0x05B54, /* FW Semaphore */ - E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */ - E1000_RETA_BASE = 0x05C00, /* Redirection Table - RW */ -#define E1000_RETA(_n) (E1000_RETA_BASE + ((_n) * 4)) - E1000_RSSRK_BASE = 0x05C80, /* RSS Random Key - RW */ -#define E1000_RSSRK(_n) (E1000_RSSRK_BASE + ((_n) * 4)) - E1000_FFLT_DBG = 0x05F04, /* Debug Register */ - E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */ -#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4)) -#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE - E1000_HICR = 0x08F00, /* Host Interface Control */ -}; - -#define E1000_MAX_PHY_ADDR 4 - -/* IGP01E1000 Specific Registers */ -#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ -#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ -#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ -#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ -#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ -#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ -#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ -#define IGP_PAGE_SHIFT 5 -#define PHY_REG_MASK 0x1F - -#define BM_WUC_PAGE 800 -#define BM_WUC_ADDRESS_OPCODE 0x11 -#define BM_WUC_DATA_OPCODE 0x12 -#define BM_WUC_ENABLE_PAGE 769 -#define BM_WUC_ENABLE_REG 17 -#define BM_WUC_ENABLE_BIT (1 << 2) -#define BM_WUC_HOST_WU_BIT (1 << 4) -#define BM_WUC_ME_WU_BIT (1 << 5) - -#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) -#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) -#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) - -#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 -#define IGP01E1000_PHY_POLARITY_MASK 0x0078 - -#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 -#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ - -#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 - -#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ -#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ -#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ - -#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 - -#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 -#define IGP01E1000_PSSR_MDIX 0x0800 -#define IGP01E1000_PSSR_SPEED_MASK 0xC000 -#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 - -#define IGP02E1000_PHY_CHANNEL_NUM 4 -#define IGP02E1000_PHY_AGC_A 0x11B1 -#define IGP02E1000_PHY_AGC_B 0x12B1 -#define IGP02E1000_PHY_AGC_C 0x14B1 -#define IGP02E1000_PHY_AGC_D 0x18B1 - -#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ -#define IGP02E1000_AGC_LENGTH_MASK 0x7F -#define IGP02E1000_AGC_RANGE 15 - -/* manage.c */ -#define E1000_VFTA_ENTRY_SHIFT 5 -#define E1000_VFTA_ENTRY_MASK 0x7F -#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F - -#define E1000_HICR_EN 0x01 /* Enable bit - RO */ -/* Driver sets this bit when done to put command in RAM */ -#define E1000_HICR_C 0x02 -#define E1000_HICR_FW_RESET_ENABLE 0x40 -#define E1000_HICR_FW_RESET 0x80 - -#define E1000_FWSM_MODE_MASK 0xE -#define E1000_FWSM_MODE_SHIFT 1 - -#define E1000_MNG_IAMT_MODE 0x3 -#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 -#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 -#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 -#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 -#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 -#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 - -/* nvm.c */ -#define E1000_STM_OPCODE 0xDB00 - -#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 -#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 -#define E1000_KMRNCTRLSTA_REN 0x00200000 -#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ -#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ -#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ -#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ -#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ -#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ -#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 -#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 -#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ - -#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 -#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ -#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ -#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ - -/* IFE PHY Extended Status Control */ -#define IFE_PESC_POLARITY_REVERSED 0x0100 - -/* IFE PHY Special Control */ -#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 -#define IFE_PSC_FORCE_POLARITY 0x0020 - -/* IFE PHY Special Control and LED Control */ -#define IFE_PSCL_PROBE_MODE 0x0020 -#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ -#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ - -/* IFE PHY MDIX Control */ -#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ -#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ -#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */ - -#define E1000_CABLE_LENGTH_UNDEFINED 0xFF +struct e1000_hw; #define E1000_DEV_ID_82571EB_COPPER 0x105E #define E1000_DEV_ID_82571EB_FIBER 0x105F @@ -374,13 +52,11 @@ enum e1e_registers { #define E1000_DEV_ID_82573L 0x109A #define E1000_DEV_ID_82574L 0x10D3 #define E1000_DEV_ID_82574LA 0x10F6 -#define E1000_DEV_ID_82583V 0x150C - +#define E1000_DEV_ID_82583V 0x150C #define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 #define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB - #define E1000_DEV_ID_ICH8_82567V_3 0x1501 #define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A @@ -415,12 +91,12 @@ enum e1e_registers { #define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A #define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 -#define E1000_REVISION_4 4 +#define E1000_REVISION_4 4 -#define E1000_FUNC_1 1 +#define E1000_FUNC_1 1 -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 enum e1000_mac_type { e1000_82571, @@ -525,16 +201,6 @@ enum e1000_serdes_link_state { e1000_serdes_link_forced_up }; -/* Receive Descriptor */ -struct e1000_rx_desc { - __le64 buffer_addr; /* Address of the descriptor's data buffer */ - __le16 length; /* Length of data DMAed into data buffer */ - __le16 csum; /* Packet checksum */ - u8 status; /* Descriptor status */ - u8 errors; /* Descriptor Errors */ - __le16 special; -}; - /* Receive Descriptor - Extended */ union e1000_rx_desc_extended { struct { @@ -657,7 +323,7 @@ struct e1000_data_desc { struct { u8 status; /* Descriptor status */ u8 popts; /* Packet Options */ - __le16 special; /* */ + __le16 special; } fields; } upper; }; @@ -753,7 +419,7 @@ struct e1000_host_command_header { u8 checksum; }; -#define E1000_HI_MAX_DATA_LENGTH 252 +#define E1000_HI_MAX_DATA_LENGTH 252 struct e1000_host_command_info { struct e1000_host_command_header command_header; u8 command_data[E1000_HI_MAX_DATA_LENGTH]; @@ -768,13 +434,18 @@ struct e1000_host_mng_command_header { u16 command_length; }; -#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 struct e1000_host_mng_command_info { struct e1000_host_mng_command_header command_header; u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; }; -/* Function pointers and static data for the MAC. */ +#include "mac.h" +#include "phy.h" +#include "nvm.h" +#include "manage.h" + +/* Function pointers for the MAC. */ struct e1000_mac_operations { s32 (*id_led_init)(struct e1000_hw *); s32 (*blink_led)(struct e1000_hw *); @@ -1003,4 +674,8 @@ struct e1000_hw { } dev_spec; }; +#include "82571.h" +#include "80003es2lan.h" +#include "ich8lan.h" + #endif diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 24d9f61956f0..dff7bff8b8e0 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -57,147 +57,6 @@ #include "e1000.h" -#define ICH_FLASH_GFPREG 0x0000 -#define ICH_FLASH_HSFSTS 0x0004 -#define ICH_FLASH_HSFCTL 0x0006 -#define ICH_FLASH_FADDR 0x0008 -#define ICH_FLASH_FDATA0 0x0010 -#define ICH_FLASH_PR0 0x0074 - -#define ICH_FLASH_READ_COMMAND_TIMEOUT 500 -#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 -#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000 -#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF -#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 - -#define ICH_CYCLE_READ 0 -#define ICH_CYCLE_WRITE 2 -#define ICH_CYCLE_ERASE 3 - -#define FLASH_GFPREG_BASE_MASK 0x1FFF -#define FLASH_SECTOR_ADDR_SHIFT 12 - -#define ICH_FLASH_SEG_SIZE_256 256 -#define ICH_FLASH_SEG_SIZE_4K 4096 -#define ICH_FLASH_SEG_SIZE_8K 8192 -#define ICH_FLASH_SEG_SIZE_64K 65536 - - -#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ -/* FW established a valid mode */ -#define E1000_ICH_FWSM_FW_VALID 0x00008000 - -#define E1000_ICH_MNG_IAMT_MODE 0x2 - -#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ - (ID_LED_DEF1_OFF2 << 8) | \ - (ID_LED_DEF1_ON2 << 4) | \ - (ID_LED_DEF1_DEF2)) - -#define E1000_ICH_NVM_SIG_WORD 0x13 -#define E1000_ICH_NVM_SIG_MASK 0xC000 -#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 -#define E1000_ICH_NVM_SIG_VALUE 0x80 - -#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 - -#define E1000_FEXTNVM_SW_CONFIG 1 -#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ - -#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000 -#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000 - -#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 -#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 -#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 - -#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL - -#define E1000_ICH_RAR_ENTRIES 7 -#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ -#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ - -#define PHY_PAGE_SHIFT 5 -#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ - ((reg) & MAX_PHY_REG_ADDRESS)) -#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ -#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ - -#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 -#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 -#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 - -#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ - -#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ - -/* SMBus Control Phy Register */ -#define CV_SMB_CTRL PHY_REG(769, 23) -#define CV_SMB_CTRL_FORCE_SMBUS 0x0001 - -/* SMBus Address Phy Register */ -#define HV_SMB_ADDR PHY_REG(768, 26) -#define HV_SMB_ADDR_MASK 0x007F -#define HV_SMB_ADDR_PEC_EN 0x0200 -#define HV_SMB_ADDR_VALID 0x0080 -#define HV_SMB_ADDR_FREQ_MASK 0x1100 -#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8 -#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12 - -/* PHY Power Management Control */ -#define HV_PM_CTRL PHY_REG(770, 17) -#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 - -/* PHY Low Power Idle Control */ -#define I82579_LPI_CTRL PHY_REG(772, 20) -#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 -#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80 - -/* EMI Registers */ -#define I82579_EMI_ADDR 0x10 -#define I82579_EMI_DATA 0x11 -#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ -#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */ -#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ -#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ -#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ -#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */ - -/* Intel Rapid Start Technology Support */ -#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70) -#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 -#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) -#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000 -#define I217_CGFREG PHY_REG(772, 29) -#define I217_CGFREG_ENABLE_MTA_RESET 0x0002 -#define I217_MEMPWR PHY_REG(772, 26) -#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 - -/* Strapping Option Register - RO */ -#define E1000_STRAP 0x0000C -#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 -#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 -#define E1000_STRAP_SMT_FREQ_MASK 0x00003000 -#define E1000_STRAP_SMT_FREQ_SHIFT 12 - -/* OEM Bits Phy Register */ -#define HV_OEM_BITS PHY_REG(768, 25) -#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ -#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ -#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ - -#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ -#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ - -/* KMRN Mode Control */ -#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) -#define HV_KMRN_MDIO_SLOW 0x0400 - -/* KMRN FIFO Control and Status */ -#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) -#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 -#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 - /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ /* Offset 04h HSFSTS */ union ich8_hws_flash_status { @@ -252,7 +111,6 @@ union ich8_flash_protected_range { u32 regval; }; -static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); @@ -264,9 +122,7 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 *data); -static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); -static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); @@ -278,7 +134,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw); static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); -static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); @@ -330,12 +186,12 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) u16 retry_count; for (retry_count = 0; retry_count < 2; retry_count++) { - ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg); + ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg); if (ret_val || (phy_reg == 0xFFFF)) continue; phy_id = (u32)(phy_reg << 16); - ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg); + ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg); if (ret_val || (phy_reg == 0xFFFF)) { phy_id = 0; continue; @@ -378,10 +234,15 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) s32 ret_val; u16 phy_reg; + /* Gate automatic PHY configuration by hardware on managed and + * non-managed 82579 and newer adapters. + */ + e1000_gate_hw_phy_config_ich8lan(hw, true); + ret_val = hw->phy.ops.acquire(hw); if (ret_val) { e_dbg("Failed to initialize PHY flow\n"); - return ret_val; + goto out; } /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is @@ -402,13 +263,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) /* fall-through */ case e1000_pch2lan: - /* Gate automatic PHY configuration by hardware on - * non-managed 82579 - */ - if ((hw->mac.type == e1000_pch2lan) && - !(fwsm & E1000_ICH_FWSM_FW_VALID)) - e1000_gate_hw_phy_config_ich8lan(hw, true); - if (e1000_phy_is_accessible_pchlan(hw)) { if (hw->mac.type == e1000_pch_lpt) { /* Unforce SMBus mode in PHY */ @@ -443,6 +297,15 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; ew32(FEXTNVM3, mac_reg); + if (hw->mac.type == e1000_pch_lpt) { + /* Toggling LANPHYPC brings the PHY out of SMBus mode + * So ensure that the MAC is also out of SMBus mode + */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + } + /* Toggle LANPHYPC Value bit */ mac_reg = er32(CTRL); mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; @@ -476,6 +339,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) */ ret_val = e1000e_phy_hw_reset_generic(hw); +out: /* Ungate automatic PHY configuration on non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(fwsm & E1000_ICH_FWSM_FW_VALID)) { @@ -495,7 +359,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = 0; + s32 ret_val; phy->addr = 1; phy->reset_delay_us = 100; @@ -778,68 +642,143 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) if (mac->type == e1000_ich8lan) e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); - /* Gate automatic PHY configuration by hardware on managed - * 82579 and i217 - */ - if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) && - (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) - e1000_gate_hw_phy_config_ich8lan(hw, true); - return 0; } /** + * __e1000_access_emi_reg_locked - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + * + * This helper function assumes the SW/FW/HW Semaphore is already acquired. + **/ +static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val; + + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data); + else + ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data); + + return ret_val; +} + +/** + * e1000_read_emi_reg_locked - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + * + * Assumes the SW/FW/HW Semaphore is already acquired. + **/ +s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) +{ + return __e1000_access_emi_reg_locked(hw, addr, data, true); +} + +/** + * e1000_write_emi_reg_locked - Write Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be written to the EMI address + * + * Assumes the SW/FW/HW Semaphore is already acquired. + **/ +static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) +{ + return __e1000_access_emi_reg_locked(hw, addr, &data, false); +} + +/** * e1000_set_eee_pchlan - Enable/disable EEE support * @hw: pointer to the HW structure * - * Enable/disable EEE based on setting in dev_spec structure. The bits in - * the LPI Control register will remain set only if/when link is up. + * Enable/disable EEE based on setting in dev_spec structure, the duplex of + * the link and the EEE capabilities of the link partner. The LPI Control + * register bits will remain set only if/when link is up. **/ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; - s32 ret_val = 0; - u16 phy_reg; + s32 ret_val; + u16 lpi_ctrl; if ((hw->phy.type != e1000_phy_82579) && (hw->phy.type != e1000_phy_i217)) return 0; - ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); + ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; - if (dev_spec->eee_disable) - phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; - else - phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; - - ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); + ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); if (ret_val) - return ret_val; + goto release; + + /* Clear bits that enable EEE in various speeds */ + lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; + + /* Enable EEE if not disabled by user */ + if (!dev_spec->eee_disable) { + u16 lpa, pcs_status, data; - if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) { /* Save off link partner's EEE ability */ - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, - I217_EEE_LP_ABILITY); + switch (hw->phy.type) { + case e1000_phy_82579: + lpa = I82579_EEE_LP_ABILITY; + pcs_status = I82579_EEE_PCS_STATUS; + break; + case e1000_phy_i217: + lpa = I217_EEE_LP_ABILITY; + pcs_status = I217_EEE_PCS_STATUS; + break; + default: + ret_val = -E1000_ERR_PHY; + goto release; + } + ret_val = e1000_read_emi_reg_locked(hw, lpa, + &dev_spec->eee_lp_ability); if (ret_val) goto release; - e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability); - /* EEE is not supported in 100Half, so ignore partner's EEE - * in 100 ability if full-duplex is not advertised. + /* Enable EEE only for speeds in which the link partner is + * EEE capable. */ - e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg); - if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS)) - dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED; -release: - hw->phy.ops.release(hw); + if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; + + if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { + e1e_rphy_locked(hw, MII_LPA, &data); + if (data & LPA_100FULL) + lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; + else + /* EEE is not supported in 100Half, so ignore + * partner's EEE in 100 ability if full-duplex + * is not advertised. + */ + dev_spec->eee_lp_ability &= + ~I82579_EEE_100_SUPPORTED; + } + + /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ + ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); + if (ret_val) + goto release; } - return 0; + ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl); +release: + hw->phy.ops.release(hw); + + return ret_val; } /** @@ -1017,7 +956,7 @@ static DEFINE_MUTEX(nvm_mutex); * * Acquires the mutex for performing NVM operations. **/ -static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) +static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw) { mutex_lock(&nvm_mutex); @@ -1030,7 +969,7 @@ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) * * Releases the mutex used while performing NVM operations. **/ -static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) +static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw) { mutex_unlock(&nvm_mutex); } @@ -1322,7 +1261,7 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw) u32 strap = er32(STRAP); u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> E1000_STRAP_SMT_FREQ_SHIFT; - s32 ret_val = 0; + s32 ret_val; strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; @@ -1558,7 +1497,7 @@ release: **/ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) { - s32 ret_val = 0; + s32 ret_val; u32 ctrl_reg = 0; u32 ctrl_ext = 0; u32 reg = 0; @@ -1727,7 +1666,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) */ if (hw->phy.revision < 2) { e1000e_phy_sw_reset(hw); - ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140); + ret_val = e1e_wphy(hw, MII_BMCR, 0x3140); } } @@ -1757,6 +1696,11 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) if (ret_val) goto release; ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF); + if (ret_val) + goto release; + + /* set MSE higher to enable link to stay up when noise is high */ + ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); release: hw->phy.ops.release(hw); @@ -1983,22 +1927,18 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) /* Set MDIO slow mode before any other MDIO access */ ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; - ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD); - if (ret_val) - goto release; /* set MSE higher to enable link to stay up when noise is high */ - ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034); - if (ret_val) - goto release; - ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN); + ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); if (ret_val) goto release; /* drop link after 5 times MSE threshold was reached */ - ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005); + ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); release: hw->phy.ops.release(hw); @@ -2172,10 +2112,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; - ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, - I82579_LPI_UPDATE_TIMER); - if (!ret_val) - ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387); + ret_val = e1000_write_emi_reg_locked(hw, + I82579_LPI_UPDATE_TIMER, + 0x1387); hw->phy.ops.release(hw); } @@ -2219,7 +2158,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) **/ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) { - s32 ret_val = 0; + s32 ret_val; u16 oem_reg; ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); @@ -2277,6 +2216,8 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) @@ -2949,19 +2890,32 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) { s32 ret_val; u16 data; + u16 word; + u16 valid_csum_mask; - /* Read 0x19 and check bit 6. If this bit is 0, the checksum - * needs to be fixed. This bit is an indication that the NVM - * was prepared by OEM software and did not calculate the - * checksum...a likely scenario. + /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, + * the checksum needs to be fixed. This bit is an indication that + * the NVM was prepared by OEM software and did not calculate + * the checksum...a likely scenario. */ - ret_val = e1000_read_nvm(hw, 0x19, 1, &data); + switch (hw->mac.type) { + case e1000_pch_lpt: + word = NVM_COMPAT; + valid_csum_mask = NVM_COMPAT_VALID_CSUM; + break; + default: + word = NVM_FUTURE_INIT_WORD1; + valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; + break; + } + + ret_val = e1000_read_nvm(hw, word, 1, &data); if (ret_val) return ret_val; - if (!(data & 0x40)) { - data |= 0x40; - ret_val = e1000_write_nvm(hw, 0x19, 1, &data); + if (!(data & valid_csum_mask)) { + data |= valid_csum_mask; + ret_val = e1000_write_nvm(hw, word, 1, &data); if (ret_val) return ret_val; ret_val = e1000e_update_nvm_checksum(hw); @@ -3975,8 +3929,7 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) if (ret_val) return; reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; - ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, - reg_data); + e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); } /** @@ -4011,19 +3964,20 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) if (!dev_spec->eee_disable) { u16 eee_advert; - ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, - I217_EEE_ADVERTISEMENT); + ret_val = + e1000_read_emi_reg_locked(hw, + I217_EEE_ADVERTISEMENT, + &eee_advert); if (ret_val) goto release; - e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert); /* Disable LPLU if both link partners support 100BaseT * EEE and 100Full is advertised on both ends of the * link. */ - if ((eee_advert & I217_EEE_100_SUPPORTED) && + if ((eee_advert & I82579_EEE_100_SUPPORTED) && (dev_spec->eee_lp_ability & - I217_EEE_100_SUPPORTED) && + I82579_EEE_100_SUPPORTED) && (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_NOND0A_LPLU); @@ -4037,7 +3991,6 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) * The SMBus release must also be disabled on LCD reset. */ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { - /* Enable proxy to reset only on power good. */ e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg); phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; @@ -4298,7 +4251,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) u32 bank = 0; u32 status; - e1000e_get_cfg_done(hw); + e1000e_get_cfg_done_generic(hw); /* Wait for indication from h/w that it has completed basic config */ if (hw->mac.type >= e1000_ich10lan) { @@ -4427,7 +4380,7 @@ static const struct e1000_mac_operations ich8_mac_ops = { .reset_hw = e1000_reset_hw_ich8lan, .init_hw = e1000_init_hw_ich8lan, .setup_link = e1000_setup_link_ich8lan, - .setup_physical_interface= e1000_setup_copper_link_ich8lan, + .setup_physical_interface = e1000_setup_copper_link_ich8lan, /* id_led_init dependent on mac type */ .config_collision_dist = e1000e_config_collision_dist_generic, .rar_set = e1000e_rar_set_generic, @@ -4449,7 +4402,7 @@ static const struct e1000_phy_operations ich8_phy_ops = { static const struct e1000_nvm_operations ich8_nvm_ops = { .acquire = e1000_acquire_nvm_ich8lan, - .read = e1000_read_nvm_ich8lan, + .read = e1000_read_nvm_ich8lan, .release = e1000_release_nvm_ich8lan, .reload = e1000e_reload_nvm_generic, .update = e1000_update_nvm_checksum_ich8lan, @@ -4531,6 +4484,7 @@ const struct e1000_info e1000_pch2_info = { .mac = e1000_pch2lan, .flags = FLAG_IS_ICH | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH @@ -4539,7 +4493,7 @@ const struct e1000_info e1000_pch2_info = { .flags2 = FLAG2_HAS_PHY_STATS | FLAG2_HAS_EEE, .pba = 26, - .max_hw_frame_size = DEFAULT_JUMBO, + .max_hw_frame_size = 9018, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, @@ -4550,6 +4504,7 @@ const struct e1000_info e1000_pch_lpt_info = { .mac = e1000_pch_lpt, .flags = FLAG_IS_ICH | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH @@ -4558,7 +4513,7 @@ const struct e1000_info e1000_pch_lpt_info = { .flags2 = FLAG2_HAS_PHY_STATS | FLAG2_HAS_EEE, .pba = 26, - .max_hw_frame_size = DEFAULT_JUMBO, + .max_hw_frame_size = 9018, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h new file mode 100644 index 000000000000..b6d3174d7d2d --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -0,0 +1,268 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_ICH8LAN_H_ +#define _E1000E_ICH8LAN_H_ + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_PR0 0x0074 + +/* Requires up to 10 seconds when MNG might be accessing part. */ +#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 + +#define ICH_CYCLE_READ 0 +#define ICH_CYCLE_WRITE 2 +#define ICH_CYCLE_ERASE 3 + +#define FLASH_GFPREG_BASE_MASK 0x1FFF +#define FLASH_SECTOR_ADDR_SHIFT 12 + +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_8K 8192 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ +/* FW established a valid mode */ +#define E1000_ICH_FWSM_FW_VALID 0x00008000 +#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ +#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 + +#define E1000_ICH_MNG_IAMT_MODE 0x2 + +#define E1000_FWSM_WLOCK_MAC_MASK 0x0380 +#define E1000_FWSM_WLOCK_MAC_SHIFT 7 + +/* Shared Receive Address Registers */ +#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) +#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) + +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_OFF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC000 +#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 +#define E1000_ICH_NVM_SIG_VALUE 0x80 + +#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 + +#define E1000_FEXTNVM_SW_CONFIG 1 +#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */ + +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000 +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000 + +#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 + +#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL + +#define E1000_ICH_RAR_ENTRIES 7 +#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ +#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ + +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 +#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 +#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 + +/* PHY Wakeup Registers and defines */ +#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) +#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) +#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) +#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) +#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) +#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) +#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) + +#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ +#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ +#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ +#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ +#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ +#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ +#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ + +#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ +#define HV_MUX_DATA_CTRL PHY_REG(776, 16) +#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 +#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 +#define HV_STATS_PAGE 778 +/* Half-duplex collision counts */ +#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */ +#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) +#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */ +#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) +#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */ +#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) +#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */ +#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) +#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */ +#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) +#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ +#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) +#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */ +#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) + +#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ + +#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ +#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ + +/* SMBus Control Phy Register */ +#define CV_SMB_CTRL PHY_REG(769, 23) +#define CV_SMB_CTRL_FORCE_SMBUS 0x0001 + +/* SMBus Address Phy Register */ +#define HV_SMB_ADDR PHY_REG(768, 26) +#define HV_SMB_ADDR_MASK 0x007F +#define HV_SMB_ADDR_PEC_EN 0x0200 +#define HV_SMB_ADDR_VALID 0x0080 +#define HV_SMB_ADDR_FREQ_MASK 0x1100 +#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8 +#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12 + +/* Strapping Option Register - RO */ +#define E1000_STRAP 0x0000C +#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 +#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 +#define E1000_STRAP_SMT_FREQ_MASK 0x00003000 +#define E1000_STRAP_SMT_FREQ_SHIFT 12 + +/* OEM Bits Phy Register */ +#define HV_OEM_BITS PHY_REG(768, 25) +#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ +#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ +#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ + +/* KMRN Mode Control */ +#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) +#define HV_KMRN_MDIO_SLOW 0x0400 + +/* KMRN FIFO Control and Status */ +#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 + +/* PHY Power Management Control */ +#define HV_PM_CTRL PHY_REG(770, 17) +#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 + +#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ + +/* PHY Low Power Idle Control */ +#define I82579_LPI_CTRL PHY_REG(772, 20) +#define I82579_LPI_CTRL_100_ENABLE 0x2000 +#define I82579_LPI_CTRL_1000_ENABLE 0x4000 +#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 +#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80 + +/* Extended Management Interface (EMI) Registers */ +#define I82579_EMI_ADDR 0x10 +#define I82579_EMI_DATA 0x11 +#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ +#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */ +#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ +#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ +#define I82579_EEE_PCS_STATUS 0x182D /* IEEE MMD Register 3.1 >> 8 */ +#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ +#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ +#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ +#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ +#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ +#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ +#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ +#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ +#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ + +#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ +#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ + +/* Intel Rapid Start Technology Support */ +#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70) +#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 +#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) +#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000 +#define I217_CGFREG PHY_REG(772, 29) +#define I217_CGFREG_ENABLE_MTA_RESET 0x0002 +#define I217_MEMPWR PHY_REG(772, 26) +#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 + +/* Receive Address Initial CRC Calculation */ +#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) + +/* Latency Tolerance Reporting */ +#define E1000_LTRV 0x000F8 +#define E1000_LTRV_SCALE_MAX 5 +#define E1000_LTRV_SCALE_FACTOR 5 +#define E1000_LTRV_REQ_SHIFT 15 +#define E1000_LTRV_NOSNOOP_SHIFT 16 +#define E1000_LTRV_SEND (1 << 30) + +/* Proprietary Latency Tolerance Reporting PCI Capability */ +#define E1000_PCI_LTR_CAP_LPT 0xA8 + +/* OBFF Control & Threshold Defines */ +#define E1000_SVCR_OFF_EN 0x00000001 +#define E1000_SVCR_OFF_MASKINT 0x00001000 +#define E1000_SVCR_OFF_TIMER_MASK 0xFFFF0000 +#define E1000_SVCR_OFF_TIMER_SHIFT 16 +#define E1000_SVT_OFF_HWM_MASK 0x0000001F + +void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); +void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state); +void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); +void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); +void e1000_resume_workarounds_pchlan(struct e1000_hw *hw); +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); +s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); +#endif /* _E1000E_ICH8LAN_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index 54d9dafaf126..b78e02174601 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -165,7 +165,7 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) { u32 i; - s32 ret_val = 0; + s32 ret_val; u16 offset, nvm_alt_mac_addr_offset, nvm_data; u8 alt_mac_addr[ETH_ALEN]; @@ -1021,6 +1021,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val = 0; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; u16 speed, duplex; @@ -1052,14 +1053,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) * has completed. We read this twice because this reg has * some "sticky" (latched) bits. */ - ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); + ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg); if (ret_val) return ret_val; - ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); + ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg); if (ret_val) return ret_val; - if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + if (!(mii_status_reg & BMSR_ANEGCOMPLETE)) { e_dbg("Copper PHY and Auto Neg has not completed.\n"); return ret_val; } @@ -1070,11 +1071,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) * Page Ability Register (Address 5) to determine how * flow control was negotiated. */ - ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); + ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_nway_adv_reg); if (ret_val) return ret_val; - ret_val = - e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); + ret_val = e1e_rphy(hw, MII_LPA, &mii_nway_lp_ability_reg); if (ret_val) return ret_val; @@ -1111,8 +1111,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) * 1 | DC | 1 | DC | E1000_fc_full * */ - if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && + (mii_nway_lp_ability_reg & LPA_PAUSE_CAP)) { /* Now we need to check if the user selected Rx ONLY * of pause frames. In this case, we had to advertise * FULL flow control because we could not advertise Rx @@ -1134,10 +1134,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) *-------|---------|-------|---------|-------------------- * 0 | 1 | 1 | 1 | e1000_fc_tx_pause */ - else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + else if (!(mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && + (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) && + (mii_nway_lp_ability_reg & LPA_PAUSE_CAP) && + (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) { hw->fc.current_mode = e1000_fc_tx_pause; e_dbg("Flow Control = Tx PAUSE frames only.\n"); } @@ -1148,10 +1148,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) *-------|---------|-------|---------|-------------------- * 1 | 1 | 0 | 1 | e1000_fc_rx_pause */ - else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && - !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + else if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && + (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) && + !(mii_nway_lp_ability_reg & LPA_PAUSE_CAP) && + (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) { hw->fc.current_mode = e1000_fc_rx_pause; e_dbg("Flow Control = Rx PAUSE frames only.\n"); } else { @@ -1185,6 +1185,130 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) } } + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) && + mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = er32(PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + e_dbg("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = er32(PCS_ANADV); + pcs_lp_ability_reg = er32(PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + e_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + e_dbg("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = er32(PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + ew32(PCS_LCTL, pcs_ctrl_reg); + + ret_val = e1000e_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + return 0; } @@ -1231,8 +1355,8 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, * Sets the speed and duplex to gigabit full duplex (the only possible option) * for fiber/serdes links. **/ -s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, - u16 *duplex) +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw __always_unused + *hw, u16 *speed, u16 *duplex) { *speed = SPEED_1000; *duplex = FULL_DUPLEX; diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h new file mode 100644 index 000000000000..a61fee404ebe --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/mac.h @@ -0,0 +1,74 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_MAC_H_ +#define _E1000E_MAC_H_ + +s32 e1000e_blink_led_generic(struct e1000_hw *hw); +s32 e1000e_check_for_copper_link(struct e1000_hw *hw); +s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); +s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); +s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); +s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); +s32 e1000e_disable_pcie_master(struct e1000_hw *hw); +s32 e1000e_force_mac_fc(struct e1000_hw *hw); +s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); +s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); +void e1000_set_lan_id_single_port(struct e1000_hw *hw); +s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); +s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +s32 e1000e_id_led_init_generic(struct e1000_hw *hw); +s32 e1000e_led_on_generic(struct e1000_hw *hw); +s32 e1000e_led_off_generic(struct e1000_hw *hw); +void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); +s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); +s32 e1000e_setup_led_generic(struct e1000_hw *hw); +s32 e1000e_setup_link_generic(struct e1000_hw *hw); +s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw); +s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); + +void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); +void e1000_clear_vfta_generic(struct e1000_hw *hw); +void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +void e1000e_put_hw_semaphore(struct e1000_hw *hw); +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +void e1000e_reset_adaptive(struct e1000_hw *hw); +void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); +void e1000e_update_adaptive(struct e1000_hw *hw); +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); + +void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); +void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); +void e1000e_config_collision_dist_generic(struct e1000_hw *hw); + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c index 6dc47beb3adc..e4b0f1ef92f6 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.c +++ b/drivers/net/ethernet/intel/e1000e/manage.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -28,19 +28,6 @@ #include "e1000.h" -enum e1000_mng_mode { - e1000_mng_mode_none = 0, - e1000_mng_mode_asf, - e1000_mng_mode_pt, - e1000_mng_mode_ipmi, - e1000_mng_mode_host_if_only -}; - -#define E1000_FACTPS_MNGCG 0x20000000 - -/* Intel(R) Active Management Technology signature */ -#define E1000_IAMT_SIGNATURE 0x544D4149 - /** * e1000_calculate_checksum - Calculate checksum for buffer * @buffer: pointer to EEPROM diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h new file mode 100644 index 000000000000..326897c29ea8 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/manage.h @@ -0,0 +1,72 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_MANAGE_H_ +#define _E1000E_MANAGE_H_ + +bool e1000e_check_mng_mode_generic(struct e1000_hw *hw); +bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); +s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); +bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_SV 0x04 /* Status Validity */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 643c883dd795..a177b8b65c44 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -42,7 +42,6 @@ #include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> -#include <linux/mii.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/cpu.h> @@ -56,7 +55,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION +#define DRV_VERSION "2.2.14" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -87,20 +86,7 @@ struct e1000_reg_info { char *name; }; -#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ -#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ -#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ -#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ -#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ - -#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ -#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ -#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ -#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ -#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ - static const struct e1000_reg_info e1000_reg_info_tbl[] = { - /* General Registers */ {E1000_CTRL, "CTRL"}, {E1000_STATUS, "STATUS"}, @@ -488,20 +474,87 @@ static int e1000_desc_unused(struct e1000_ring *ring) } /** + * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp + * @adapter: board private structure + * @hwtstamps: time stamp structure to update + * @systim: unsigned 64bit system time value. + * + * Convert the system time value stored in the RX/TXSTMP registers into a + * hwtstamp which can be used by the upper level time stamping functions. + * + * The 'systim_lock' spinlock is used to protect the consistency of the + * system time value. This is needed because reading the 64 bit time + * value involves reading two 32 bit registers. The first read latches the + * value. + **/ +static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + u64 ns; + unsigned long flags; + + spin_lock_irqsave(&adapter->systim_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, systim); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +/** + * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp + * @adapter: board private structure + * @status: descriptor extended error and status field + * @skb: particular skb to include time stamp + * + * If the time stamp is valid, convert it into the timecounter ns value + * and store that result into the shhwtstamps structure which is passed + * up the network stack. + **/ +static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status, + struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u64 rxstmp; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || + !(status & E1000_RXDEXT_STATERR_TST) || + !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + /* The Rx time stamp registers contain the time stamp. No other + * received packet will be time stamped until the Rx time stamp + * registers are read. Because only one packet can be time stamped + * at a time, the register values must belong to this packet and + * therefore none of the other additional attributes need to be + * compared. + */ + rxstmp = (u64)er32(RXSTMPL); + rxstmp |= (u64)er32(RXSTMPH) << 32; + e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp); + + adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; +} + +/** * e1000_receive_skb - helper function to handle Rx indications * @adapter: board private structure - * @status: descriptor status field as written by hardware + * @staterr: descriptor extended error and status field as written by hardware * @vlan: descriptor vlan field as written by hardware (no le/be conversion) * @skb: pointer to sk_buff to be indicated to stack **/ static void e1000_receive_skb(struct e1000_adapter *adapter, struct net_device *netdev, struct sk_buff *skb, - u8 status, __le16 vlan) + u32 staterr, __le16 vlan) { u16 tag = le16_to_cpu(vlan); + + e1000e_rx_hwtstamp(adapter, staterr, skb); + skb->protocol = eth_type_trans(skb, netdev); - if (status & E1000_RXD_STAT_VP) + if (staterr & E1000_RXD_STAT_VP) __vlan_hwaccel_put_tag(skb, tag); napi_gro_receive(&adapter->napi, skb); @@ -765,7 +818,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, struct e1000_buffer *buffer_info; struct sk_buff *skb; unsigned int i; - unsigned int bufsz = 256 - 16 /* for skb_reserve */; + unsigned int bufsz = 256 - 16; /* for skb_reserve */ i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; @@ -1050,9 +1103,9 @@ static void e1000_print_hw_hang(struct work_struct *work) adapter->tx_hang_recheck = false; netif_stop_queue(netdev); - e1e_rphy(hw, PHY_STATUS, &phy_status); - e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); - e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); + e1e_rphy(hw, MII_BMSR, &phy_status); + e1e_rphy(hw, MII_STAT1000, &phy_1000t_status); + e1e_rphy(hw, MII_ESTATUS, &phy_ext_status); pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); @@ -1092,6 +1145,41 @@ static void e1000_print_hw_hang(struct work_struct *work) } /** + * e1000e_tx_hwtstamp_work - check for Tx time stamp + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. The timestamp must + * be for this skb because only one such packet is allowed in the queue. + */ +static void e1000e_tx_hwtstamp_work(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, + tx_hwtstamp_work); + struct e1000_hw *hw = &adapter->hw; + + if (!adapter->tx_hwtstamp_skb) + return; + + if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { + struct skb_shared_hwtstamps shhwtstamps; + u64 txstmp; + + txstmp = er32(TXSTMPL); + txstmp |= (u64)er32(TXSTMPH) << 32; + + e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp); + + skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps); + dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + adapter->tx_hwtstamp_skb = NULL; + } else { + /* reschedule to check later */ + schedule_work(&adapter->tx_hwtstamp_work); + } +} + +/** * e1000_clean_tx_irq - Reclaim resources after transmit completes * @tx_ring: Tx descriptor ring * @@ -1345,8 +1433,8 @@ copydone: cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) adapter->rx_hdr_split++; - e1000_receive_skb(adapter, netdev, skb, - staterr, rx_desc->wb.middle.vlan); + e1000_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.middle.vlan); next_desc: rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); @@ -1645,7 +1733,7 @@ static void e1000e_downshift_workaround(struct work_struct *work) * @irq: interrupt number * @data: pointer to a network interface device structure **/ -static irqreturn_t e1000_intr_msi(int irq, void *data) +static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1671,7 +1759,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) /* disable receives */ u32 rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); - adapter->flags |= FLAG_RX_RESTART_NOW; + adapter->flags |= FLAG_RESTART_NOW; } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) @@ -1711,7 +1799,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) * @irq: interrupt number * @data: pointer to a network interface device structure **/ -static irqreturn_t e1000_intr(int irq, void *data) +static irqreturn_t e1000_intr(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1751,7 +1839,7 @@ static irqreturn_t e1000_intr(int irq, void *data) /* disable receives */ rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); - adapter->flags |= FLAG_RX_RESTART_NOW; + adapter->flags |= FLAG_RESTART_NOW; } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) @@ -1786,7 +1874,7 @@ static irqreturn_t e1000_intr(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t e1000_msix_other(int irq, void *data) +static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1818,8 +1906,7 @@ no_link_interrupt: return IRQ_HANDLED; } - -static irqreturn_t e1000_intr_msix_tx(int irq, void *data) +static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1837,7 +1924,7 @@ static irqreturn_t e1000_intr_msix_tx(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t e1000_intr_msix_rx(int irq, void *data) +static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1924,7 +2011,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; /* Auto-Mask Other interrupts upon ICR read */ -#define E1000_EIAC_MASK_82574 0x01F00000 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); ctrl_ext |= E1000_CTRL_EXT_EIAME; ew32(CTRL_EXT, ctrl_ext); @@ -2394,9 +2480,7 @@ void e1000e_free_rx_resources(struct e1000_ring *rx_ring) * while increasing bulk throughput. This functionality is controlled * by the InterruptThrottleRate module parameter. **/ -static unsigned int e1000_update_itr(struct e1000_adapter *adapter, - u16 itr_setting, int packets, - int bytes) +static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) { unsigned int retval = itr_setting; @@ -2441,7 +2525,6 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter, static void e1000_set_itr(struct e1000_adapter *adapter) { - struct e1000_hw *hw = &adapter->hw; u16 current_itr; u32 new_itr = adapter->itr; @@ -2457,18 +2540,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter) goto set_itr_now; } - adapter->tx_itr = e1000_update_itr(adapter, - adapter->tx_itr, - adapter->total_tx_packets, - adapter->total_tx_bytes); + adapter->tx_itr = e1000_update_itr(adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) adapter->tx_itr = low_latency; - adapter->rx_itr = e1000_update_itr(adapter, - adapter->rx_itr, - adapter->total_rx_packets, - adapter->total_rx_bytes); + adapter->rx_itr = e1000_update_itr(adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) adapter->rx_itr = low_latency; @@ -2504,10 +2585,7 @@ set_itr_now: if (adapter->msix_entries) adapter->rx_ring->set_itr = 1; else - if (new_itr) - ew32(ITR, 1000000000 / (new_itr * 256)); - else - ew32(ITR, 0); + e1000e_write_itr(adapter, new_itr); } } @@ -3049,7 +3127,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) ew32(RCTL, rctl); /* just started the receive unit, no need to restart */ - adapter->flags &= ~FLAG_RX_RESTART_NOW; + adapter->flags &= ~FLAG_RESTART_NOW; } /** @@ -3144,18 +3222,23 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) rxcsum &= ~E1000_RXCSUM_TUOFL; ew32(RXCSUM, rxcsum); - if (adapter->hw.mac.type == e1000_pch2lan) { - /* With jumbo frames, excessive C-state transition - * latencies result in dropped transactions. - */ - if (adapter->netdev->mtu > ETH_DATA_LEN) { + /* With jumbo frames, excessive C-state transition latencies result + * in dropped transactions. + */ + if (adapter->netdev->mtu > ETH_DATA_LEN) { + u32 lat = + ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - + adapter->max_frame_size) * 8 / 1000; + + if (adapter->flags & FLAG_IS_ICH) { u32 rxdctl = er32(RXDCTL(0)); ew32(RXDCTL(0), rxdctl | 0x3); - pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); - } else { - pm_qos_update_request(&adapter->netdev->pm_qos_req, - PM_QOS_DEFAULT_VALUE); } + + pm_qos_update_request(&adapter->netdev->pm_qos_req, lat); + } else { + pm_qos_update_request(&adapter->netdev->pm_qos_req, + PM_QOS_DEFAULT_VALUE); } /* Enable Receives */ @@ -3344,6 +3427,241 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) } /** + * e1000e_get_base_timinca - get default SYSTIM time increment attributes + * @adapter: board private structure + * @timinca: pointer to returned time increment attributes + * + * Get attributes for incrementing the System Time Register SYSTIML/H at + * the default base frequency, and set the cyclecounter shift value. + **/ +s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) +{ + struct e1000_hw *hw = &adapter->hw; + u32 incvalue, incperiod, shift; + + /* Make sure clock is enabled on I217 before checking the frequency */ + if ((hw->mac.type == e1000_pch_lpt) && + !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && + !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { + u32 fextnvm7 = er32(FEXTNVM7); + + if (!(fextnvm7 & (1 << 0))) { + ew32(FEXTNVM7, fextnvm7 | (1 << 0)); + e1e_flush(); + } + } + + switch (hw->mac.type) { + case e1000_pch2lan: + case e1000_pch_lpt: + /* On I217, the clock frequency is 25MHz or 96MHz as + * indicated by the System Clock Frequency Indication + */ + if ((hw->mac.type != e1000_pch_lpt) || + (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { + /* Stable 96MHz frequency */ + incperiod = INCPERIOD_96MHz; + incvalue = INCVALUE_96MHz; + shift = INCVALUE_SHIFT_96MHz; + adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + break; + } + /* fall-through */ + case e1000_82574: + case e1000_82583: + /* Stable 25MHz frequency */ + incperiod = INCPERIOD_25MHz; + incvalue = INCVALUE_25MHz; + shift = INCVALUE_SHIFT_25MHz; + adapter->cc.shift = shift; + break; + default: + return -EINVAL; + } + + *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) | + ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK)); + + return 0; +} + +/** + * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable + * @adapter: board private structure + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware filters. + * Not all combinations are supported, in particular event type has to be + * specified. Matching the kind of event packet is not supported, with the + * exception of "all V2 events regardless of level 2 or 4". + **/ +static int e1000e_config_hwtstamp(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct hwtstamp_config *config = &adapter->hwtstamp_config; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + u32 rxmtrl = 0; + u16 rxudp = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + s32 ret_val; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return -EINVAL; + + /* flags reserved for future extensions - must be zero */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + break; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + /* Also time stamps V2 L2 Path Delay Request/Response */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; + is_l2 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + /* Also time stamps V2 L2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; + is_l2 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + /* Hardware cannot filter just V2 L4 Sync messages; + * fall-through to V2 (both L2 and L4) Sync. + */ + case HWTSTAMP_FILTER_PTP_V2_SYNC: + /* Also time stamps V2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + /* Hardware cannot filter just V2 L4 Delay Request messages; + * fall-through to V2 (both L2 and L4) Delay Request. + */ + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* Also time stamps V2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + /* Hardware cannot filter just V2 L4 or L2 Event messages; + * fall-through to all V2 (both L2 and L4) Events. + */ + case HWTSTAMP_FILTER_PTP_V2_EVENT: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + /* For V1, the hardware can only filter Sync messages or + * Delay Request messages but not both so fall-through to + * time stamp all packets. + */ + case HWTSTAMP_FILTER_ALL: + is_l2 = true; + is_l4 = true; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + /* enable/disable Tx h/w time stamping */ + regval = er32(TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + ew32(TSYNCTXCTL, regval); + if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) != + (regval & E1000_TSYNCTXCTL_ENABLED)) { + e_err("Timesync Tx Control register not set as expected\n"); + return -EAGAIN; + } + + /* enable/disable Rx h/w time stamping */ + regval = er32(TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + ew32(TSYNCRXCTL, regval); + if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED | + E1000_TSYNCRXCTL_TYPE_MASK)) != + (regval & (E1000_TSYNCRXCTL_ENABLED | + E1000_TSYNCRXCTL_TYPE_MASK))) { + e_err("Timesync Rx Control register not set as expected\n"); + return -EAGAIN; + } + + /* L2: define ethertype filter for time stamped packets */ + if (is_l2) + rxmtrl |= ETH_P_1588; + + /* define which PTP packets get time stamped */ + ew32(RXMTRL, rxmtrl); + + /* Filter by destination port */ + if (is_l4) { + rxudp = PTP_EV_PORT; + cpu_to_be16s(&rxudp); + } + ew32(RXUDP, rxudp); + + e1e_flush(); + + /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */ + er32(RXSTMPH); + er32(TXSTMPH); + + /* Get and set the System Time Register SYSTIM base frequency */ + ret_val = e1000e_get_base_timinca(adapter, ®val); + if (ret_val) + return ret_val; + ew32(TIMINCA, regval); + + /* reset the ns time counter */ + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + + return 0; +} + +/** * e1000_configure - configure the hardware for Rx and Tx * @adapter: private board structure **/ @@ -3509,14 +3827,17 @@ void e1000e_reset(struct e1000_adapter *adapter) break; case e1000_pch2lan: case e1000_pch_lpt: - fc->high_water = 0x05C20; - fc->low_water = 0x05048; - fc->pause_time = 0x0650; fc->refresh_time = 0x0400; - if (adapter->netdev->mtu > ETH_DATA_LEN) { - pba = 14; - ew32(PBA, pba); + + if (adapter->netdev->mtu <= ETH_DATA_LEN) { + fc->high_water = 0x05C20; + fc->low_water = 0x05048; + fc->pause_time = 0x0650; + break; } + + fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; + fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; break; } @@ -3569,6 +3890,9 @@ void e1000e_reset(struct e1000_adapter *adapter) e1000e_reset_adaptive(hw); + /* initialize systim and reset the ns time counter */ + e1000e_config_hwtstamp(adapter); + if (!netif_running(adapter->netdev) && !test_bit(__E1000_TESTING, &adapter->state)) { e1000_power_down_phy(adapter); @@ -3705,6 +4029,24 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) } /** + * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) + * @cc: cyclecounter structure + **/ +static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) +{ + struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, + cc); + struct e1000_hw *hw = &adapter->hw; + cycle_t systim; + + /* latch SYSTIMH on read of SYSTIML */ + systim = (cycle_t)er32(SYSTIML); + systim |= (cycle_t)er32(SYSTIMH) << 32; + + return systim; +} + +/** * e1000_sw_init - Initialize general software structures (struct e1000_adapter) * @adapter: board private structure to initialize * @@ -3730,6 +4072,17 @@ static int e1000_sw_init(struct e1000_adapter *adapter) if (e1000_alloc_queues(adapter)) return -ENOMEM; + /* Setup hardware time stamping cyclecounter */ + if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { + adapter->cc.read = e1000e_cyclecounter_read; + adapter->cc.mask = CLOCKSOURCE_MASK(64); + adapter->cc.mult = 1; + /* cc.shift set in e1000e_get_base_tininca() */ + + spin_lock_init(&adapter->systim_lock); + INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); + } + /* Explicitly disable IRQ since the NIC can be in any state. */ e1000_irq_disable(adapter); @@ -3742,7 +4095,7 @@ static int e1000_sw_init(struct e1000_adapter *adapter) * @irq: interrupt number * @data: pointer to a network interface device structure **/ -static irqreturn_t e1000_intr_msi_test(int irq, void *data) +static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -3913,10 +4266,8 @@ static int e1000_open(struct net_device *netdev) e1000_update_mng_vlan(adapter); /* DMA latency requirement to workaround jumbo issue */ - if (adapter->hw.mac.type == e1000_pch2lan) - pm_qos_add_request(&adapter->netdev->pm_qos_req, - PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); + pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt @@ -4024,8 +4375,7 @@ static int e1000_close(struct net_device *netdev) !test_bit(__E1000_TESTING, &adapter->state)) e1000e_release_hw_control(adapter); - if (adapter->hw.mac.type == e1000_pch2lan) - pm_qos_remove_request(&adapter->netdev->pm_qos_req); + pm_qos_remove_request(&adapter->netdev->pm_qos_req); pm_runtime_put_sync(&pdev->dev); @@ -4312,14 +4662,14 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) (adapter->hw.phy.media_type == e1000_media_type_copper)) { int ret_val; - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); - ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); - ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); - ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); - ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); - ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); - ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); - ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); + ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); + ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); + ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); + ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); + ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); + ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); + ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); + ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); if (ret_val) e_warn("Error reading PHY register\n"); } else { @@ -4346,9 +4696,8 @@ static void e1000_print_link_info(struct e1000_adapter *adapter) u32 ctrl = er32(CTRL); /* Link status message must follow this format for user tools */ - printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", - adapter->netdev->name, - adapter->link_speed, + pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : (ctrl & E1000_CTRL_RFCE) ? "Rx" : @@ -4401,11 +4750,11 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter) { /* make sure the receive unit is started */ if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && - (adapter->flags & FLAG_RX_RESTART_NOW)) { + (adapter->flags & FLAG_RESTART_NOW)) { struct e1000_hw *hw = &adapter->hw; u32 rctl = er32(RCTL); ew32(RCTL, rctl | E1000_RCTL_EN); - adapter->flags &= ~FLAG_RX_RESTART_NOW; + adapter->flags &= ~FLAG_RESTART_NOW; } } @@ -4481,6 +4830,13 @@ static void e1000_watchdog_task(struct work_struct *work) &adapter->link_speed, &adapter->link_duplex); e1000_print_link_info(adapter); + + /* check if SmartSpeed worked */ + e1000e_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, + "Link Speed was downgraded by SmartSpeed\n"); + /* On supported PHYs, check for duplex mismatch only * if link has autonegotiated at 10/100 half */ @@ -4492,9 +4848,9 @@ static void e1000_watchdog_task(struct work_struct *work) (adapter->link_duplex == HALF_DUPLEX)) { u16 autoneg_exp; - e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); + e1e_rphy(hw, MII_EXPANSION, &autoneg_exp); - if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) + if (!(autoneg_exp & EXPANSION_NWAY)) e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); } @@ -4567,15 +4923,22 @@ static void e1000_watchdog_task(struct work_struct *work) adapter->link_speed = 0; adapter->link_duplex = 0; /* Link status message must follow this format */ - printk(KERN_INFO "e1000e: %s NIC Link is Down\n", - adapter->netdev->name); + pr_info("%s NIC Link is Down\n", adapter->netdev->name); netif_carrier_off(netdev); if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); - if (adapter->flags & FLAG_RX_NEEDS_RESTART) - schedule_work(&adapter->reset_task); + /* The link is lost so the controller stops DMA. + * If there is queued Tx work that cannot be done + * or if on an 8000ES2LAN which requires a Rx packet + * buffer work-around on link down event, reset the + * controller to flush the Tx/Rx packet buffers. + * (Do the reset outside of interrupt context). + */ + if ((adapter->flags & FLAG_RX_NEEDS_RESTART) || + (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) + adapter->flags |= FLAG_RESTART_NOW; else pm_schedule_suspend(netdev->dev.parent, LINK_TIMEOUT); @@ -4597,20 +4960,14 @@ link_up: adapter->gotc_old = adapter->stats.gotc; spin_unlock(&adapter->stats64_lock); - e1000e_update_adaptive(&adapter->hw); - - if (!netif_carrier_ok(netdev) && - (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { - /* We've lost link, so the controller stops DMA, - * but we've got queued Tx work that's never going - * to get done, so reset controller to flush Tx. - * (Do the reset outside of interrupt context). - */ + if (adapter->flags & FLAG_RESTART_NOW) { schedule_work(&adapter->reset_task); /* return immediately since reset is imminent */ return; } + e1000e_update_adaptive(&adapter->hw); + /* Simple mode for Interrupt Throttle Rate (ITR) */ if (adapter->itr_setting == 4) { /* Symmetric Tx/Rx gets a reduced ITR=2000; @@ -4647,6 +5004,17 @@ link_up: if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) e1000e_check_82574_phy_workaround(adapter); + /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */ + if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { + if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && + (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) { + er32(RXSTMPH); + adapter->rx_hwtstamp_cleared++; + } else { + adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; + } + } + /* Reset the timer */ if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, @@ -4658,6 +5026,7 @@ link_up: #define E1000_TX_FLAGS_TSO 0x00000004 #define E1000_TX_FLAGS_IPV4 0x00000008 #define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_HWTSTAMP 0x00000020 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 #define E1000_TX_FLAGS_VLAN_SHIFT 16 @@ -4916,6 +5285,11 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) txd_lower &= ~(E1000_TXD_CMD_IFCS); + if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_EXTCMD_TSTAMP; + } + i = tx_ring->next_to_use; do { @@ -4964,12 +5338,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct e1000_hw *hw = &adapter->hw; u16 length, offset; - if (vlan_tx_tag_present(skb)) { - if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && - (adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) - return 0; - } + if (vlan_tx_tag_present(skb) && + !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && + (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) + return 0; if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) return 0; @@ -5140,7 +5513,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, nr_frags); if (count) { - skb_tx_timestamp(skb); + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + !adapter->tx_hwtstamp_skb)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= E1000_TX_FLAGS_HWTSTAMP; + adapter->tx_hwtstamp_skb = skb_get(skb); + schedule_work(&adapter->tx_hwtstamp_work); + } else { + skb_tx_timestamp(skb); + } netdev_sent_queue(netdev, skb->len); e1000_tx_queue(tx_ring, tx_flags, count); @@ -5180,10 +5561,9 @@ static void e1000_reset_task(struct work_struct *work) if (test_bit(__E1000_DOWN, &adapter->state)) return; - if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && - (adapter->flags & FLAG_RX_RESTART_NOW))) { + if (!(adapter->flags & FLAG_RESTART_NOW)) { e1000e_dump(adapter); - e_err("Reset adapter\n"); + e_err("Reset adapter unexpectedly\n"); } e1000e_reinit_locked(adapter); } @@ -5369,6 +5749,61 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, return 0; } +/** + * e1000e_hwtstamp_ioctl - control hardware time stamping + * @netdev: network interface device structure + * @ifreq: interface request + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware filters. + * Not all combinations are supported, in particular event type has to be + * specified. Matching the kind of event packet is not supported, with the + * exception of "all V2 events regardless of level 2 or 4". + **/ +static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int ret_val; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + adapter->hwtstamp_config = config; + + ret_val = e1000e_config_hwtstamp(adapter); + if (ret_val) + return ret_val; + + config = adapter->hwtstamp_config; + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* With V2 type filters which specify a Sync or Delay Request, + * Path Delay Request/Response messages are also time stamped + * by hardware so notify the caller the requested packets plus + * some others are time stamped. + */ + config.rx_filter = HWTSTAMP_FILTER_SOME; + break; + default: + break; + } + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { @@ -5376,6 +5811,8 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCGMIIREG: case SIOCSMIIREG: return e1000_mii_ioctl(netdev, ifr, cmd); + case SIOCSHWTSTAMP: + return e1000e_hwtstamp_ioctl(netdev, ifr); default: return -EOPNOTSUPP; } @@ -5386,7 +5823,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) struct e1000_hw *hw = &adapter->hw; u32 i, mac_reg; u16 phy_reg, wuc_enable; - int retval = 0; + int retval; /* copy MAC RARs to PHY RARs */ e1000_copy_rx_addrs_to_phy_ich8lan(hw); @@ -5600,14 +6037,21 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) #else static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) { + u16 aspm_ctl = 0; + + if (state & PCIE_LINK_STATE_L0S) + aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S; + if (state & PCIE_LINK_STATE_L1) + aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1; + /* Both device and parent should have the same ASPM setting. * Disable ASPM in downstream component first and then upstream. */ - pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state); + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl); if (pdev->bus->self) pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL, - state); + aspm_ctl); } #endif static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) @@ -5792,7 +6236,7 @@ static void e1000_shutdown(struct pci_dev *pdev) #ifdef CONFIG_NET_POLL_CONTROLLER -static irqreturn_t e1000_intr_msix(int irq, void *data) +static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -5956,7 +6400,6 @@ static void e1000_io_resume(struct pci_dev *pdev) */ if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_get_hw_control(adapter); - } static void e1000_print_device_info(struct e1000_adapter *adapter) @@ -6114,8 +6557,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } err = pci_request_selected_regions_exclusive(pdev, - pci_select_bars(pdev, IORESOURCE_MEM), - e1000e_driver_name); + pci_select_bars(pdev, IORESOURCE_MEM), + e1000e_driver_name); if (err) goto err_pci_reg; @@ -6274,11 +6717,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "NVM Read Error while reading MAC address\n"); memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); - if (!is_valid_ether_addr(netdev->perm_addr)) { + if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", - netdev->perm_addr); + netdev->dev_addr); err = -EIO; goto err_eeprom; } @@ -6364,6 +6806,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + /* init PTP hardware clock */ + e1000e_ptp_init(adapter); + e1000_print_device_info(adapter); if (pci_dev_run_wake(pdev)) @@ -6412,6 +6857,8 @@ static void e1000_remove(struct pci_dev *pdev) struct e1000_adapter *adapter = netdev_priv(netdev); bool down = test_bit(__E1000_DOWN, &adapter->state); + e1000e_ptp_remove(adapter); + /* The timers may be rescheduled, so explicitly disable them * from being rescheduled. */ @@ -6426,6 +6873,14 @@ static void e1000_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->update_phy_task); cancel_work_sync(&adapter->print_hang_task); + if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { + cancel_work_sync(&adapter->tx_hwtstamp_work); + if (adapter->tx_hwtstamp_skb) { + dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + adapter->tx_hwtstamp_skb = NULL; + } + } + if (!(netdev->flags & IFF_UP)) e1000_power_down_phy(adapter); @@ -6578,7 +7033,7 @@ static int __init e1000_init_module(void) int ret; pr_info("Intel(R) PRO/1000 Network Driver - %s\n", e1000e_driver_version); - pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n"); + pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n"); ret = pci_register_driver(&e1000_driver); return ret; diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index b6468804cb2e..84fecc268162 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -359,7 +359,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; - s32 ret_val; + s32 ret_val = -E1000_ERR_NVM; u16 widx = 0; /* A check for invalid values: offset too large, too many words, @@ -371,16 +371,18 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) return -E1000_ERR_NVM; } - ret_val = nvm->ops.acquire(hw); - if (ret_val) - return ret_val; - while (widx < words) { u8 write_opcode = NVM_WRITE_OPCODE_SPI; - ret_val = e1000_ready_nvm_eeprom(hw); + ret_val = nvm->ops.acquire(hw); if (ret_val) - goto release; + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } e1000_standby_nvm(hw); @@ -413,12 +415,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) break; } } + usleep_range(10000, 20000); + nvm->ops.release(hw); } - usleep_range(10000, 20000); -release: - nvm->ops.release(hw); - return ret_val; } @@ -464,8 +464,8 @@ s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, if (nvm_data != NVM_PBA_PTR_GUARD) { e_dbg("NVM PBA number is not stored as string\n"); - /* we will need 11 characters to store the PBA */ - if (pba_num_size < 11) { + /* make sure callers buffer is big enough to store the PBA */ + if (pba_num_size < E1000_PBANUM_LENGTH) { e_dbg("PBA string buffer too small\n"); return E1000_ERR_NO_SPACE; } diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h new file mode 100644 index 000000000000..45fc69561627 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/nvm.h @@ -0,0 +1,47 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_NVM_H_ +#define _E1000E_NVM_H_ + +s32 e1000e_acquire_nvm(struct e1000_hw *hw); + +s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); +s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); +s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); +void e1000e_release_nvm(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index 89d536dd7ff5..98da75dff936 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -53,8 +53,7 @@ MODULE_PARM_DESC(copybreak, */ #define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } #define E1000_PARAM(X, desc) \ - static int X[E1000_MAX_NIC+1] \ - = E1000_PARAM_INIT; \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ static unsigned int num_##X; \ module_param_array_named(X, X, int, &num_##X, 0); \ MODULE_PARM_DESC(X, desc); @@ -447,8 +446,7 @@ void e1000e_check_options(struct e1000_adapter *adapter) if (num_SmartPowerDownEnable > bd) { unsigned int spd = SmartPowerDownEnable[bd]; e1000_validate_option(&spd, &opt, adapter); - if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) - && spd) + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd) adapter->flags |= FLAG_SMART_POWER_DOWN; } } diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 28b38ff37e84..0930c136aa31 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -28,16 +28,12 @@ #include "e1000.h" -static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); -static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); -static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); static s32 e1000_wait_autoneg(struct e1000_hw *hw); -static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data, bool read, bool page_set); static u32 e1000_get_phy_addr_for_hv_page(u32 page); static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, - u16 *data, bool read); + u16 *data, bool read); /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = { @@ -57,48 +53,6 @@ static const u16 e1000_igp_2_cable_length_table[] = { #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_igp_2_cable_length_table) -#define BM_PHY_REG_PAGE(offset) \ - ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) -#define BM_PHY_REG_NUM(offset) \ - ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ - (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ - ~MAX_PHY_REG_ADDRESS))) - -#define HV_INTC_FC_PAGE_START 768 -#define I82578_ADDR_REG 29 -#define I82577_ADDR_REG 16 -#define I82577_CFG_REG 22 -#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) -#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ -#define I82577_CTRL_REG 23 - -/* 82577 specific PHY registers */ -#define I82577_PHY_CTRL_2 18 -#define I82577_PHY_STATUS_2 26 -#define I82577_PHY_DIAG_STATUS 31 - -/* I82577 PHY Status 2 */ -#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 -#define I82577_PHY_STATUS2_MDIX 0x0800 -#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 -#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 - -/* I82577 PHY Control 2 */ -#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 -#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 -#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 - -/* I82577 PHY Diagnostics Status */ -#define I82577_DSTATUS_CABLE_LENGTH 0x03FC -#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 - -/* BM PHY Copper Specific Control 1 */ -#define BM_CS_CTRL1 16 - -#define HV_MUX_DATA_CTRL PHY_REG(776, 16) -#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 -#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 - /** * e1000e_check_reset_block_generic - Check if PHY reset is blocked * @hw: pointer to the HW structure @@ -135,13 +89,13 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw) return 0; while (retry_count < 2) { - ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); + ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id); if (ret_val) return ret_val; phy->id = (u32)(phy_id << 16); udelay(20); - ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); + ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); if (ret_val) return ret_val; @@ -645,31 +599,31 @@ static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) u16 phy_data; /* Resolve Master/Slave mode */ - ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &phy_data); + ret_val = e1e_rphy(hw, MII_CTRL1000, &phy_data); if (ret_val) return ret_val; /* load defaults for future use */ - hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? - ((phy_data & CR_1000T_MS_VALUE) ? + hw->phy.original_ms_type = (phy_data & CTL1000_ENABLE_MASTER) ? + ((phy_data & CTL1000_AS_MASTER) ? e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto; switch (hw->phy.ms_type) { case e1000_ms_force_master: - phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + phy_data |= (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); break; case e1000_ms_force_slave: - phy_data |= CR_1000T_MS_ENABLE; - phy_data &= ~(CR_1000T_MS_VALUE); + phy_data |= CTL1000_ENABLE_MASTER; + phy_data &= ~(CTL1000_AS_MASTER); break; case e1000_ms_auto: - phy_data &= ~CR_1000T_MS_ENABLE; + phy_data &= ~CTL1000_ENABLE_MASTER; /* fall-through */ default: break; } - return e1e_wphy(hw, PHY_1000T_CTRL, phy_data); + return e1e_wphy(hw, MII_CTRL1000, phy_data); } /** @@ -792,7 +746,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) if (ret_val) return ret_val; /* Commit the changes. */ - ret_val = e1000e_commit_phy(hw); + ret_val = phy->ops.commit(hw); if (ret_val) { e_dbg("Error committing the PHY changes\n"); return ret_val; @@ -848,10 +802,12 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) } /* Commit the changes. */ - ret_val = e1000e_commit_phy(hw); - if (ret_val) { - e_dbg("Error committing the PHY changes\n"); - return ret_val; + if (phy->ops.commit) { + ret_val = phy->ops.commit(hw); + if (ret_val) { + e_dbg("Error committing the PHY changes\n"); + return ret_val; + } } if (phy->type == e1000_phy_82578) { @@ -895,10 +851,12 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) msleep(100); /* disable lplu d0 during driver init */ - ret_val = e1000_set_d0_lplu_state(hw, false); - if (ret_val) { - e_dbg("Error Disabling LPLU D0\n"); - return ret_val; + if (hw->phy.ops.set_d0_lplu_state) { + ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D0\n"); + return ret_val; + } } /* Configure mdi-mdix settings */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); @@ -943,12 +901,12 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) return ret_val; /* Set auto Master/Slave resolution process */ - ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); + ret_val = e1e_rphy(hw, MII_CTRL1000, &data); if (ret_val) return ret_val; - data &= ~CR_1000T_MS_ENABLE; - ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); + data &= ~CTL1000_ENABLE_MASTER; + ret_val = e1e_wphy(hw, MII_CTRL1000, data); if (ret_val) return ret_val; } @@ -978,13 +936,13 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) phy->autoneg_advertised &= phy->autoneg_mask; /* Read the MII Auto-Neg Advertisement Register (Address 4). */ - ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_autoneg_adv_reg); if (ret_val) return ret_val; if (phy->autoneg_mask & ADVERTISE_1000_FULL) { /* Read the MII 1000Base-T Control Register (Address 9). */ - ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + ret_val = e1e_rphy(hw, MII_CTRL1000, &mii_1000t_ctrl_reg); if (ret_val) return ret_val; } @@ -1000,36 +958,35 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T Control Register (Address 9). */ - mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | - NWAY_AR_100TX_HD_CAPS | - NWAY_AR_10T_FD_CAPS | - NWAY_AR_10T_HD_CAPS); - mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + mii_autoneg_adv_reg &= ~(ADVERTISE_100FULL | + ADVERTISE_100HALF | + ADVERTISE_10FULL | ADVERTISE_10HALF); + mii_1000t_ctrl_reg &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); /* Do we want to advertise 10 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_HALF) { e_dbg("Advertise 10mb Half duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + mii_autoneg_adv_reg |= ADVERTISE_10HALF; } /* Do we want to advertise 10 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_FULL) { e_dbg("Advertise 10mb Full duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + mii_autoneg_adv_reg |= ADVERTISE_10FULL; } /* Do we want to advertise 100 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_HALF) { e_dbg("Advertise 100mb Half duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + mii_autoneg_adv_reg |= ADVERTISE_100HALF; } /* Do we want to advertise 100 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_FULL) { e_dbg("Advertise 100mb Full duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + mii_autoneg_adv_reg |= ADVERTISE_100FULL; } /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ @@ -1039,14 +996,14 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) /* Do we want to advertise 1000 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { e_dbg("Advertise 1000mb Full duplex\n"); - mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + mii_1000t_ctrl_reg |= ADVERTISE_1000FULL; } /* Check for a software override of the flow control settings, and * setup the PHY advertisement registers accordingly. If * auto-negotiation is enabled, then software will have to set the * "PAUSE" bits to the correct value in the Auto-Negotiation - * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * Advertisement Register (MII_ADVERTISE) and re-start auto- * negotiation. * * The possible values of the "fc" parameter are: @@ -1064,7 +1021,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) /* Flow control (Rx & Tx) is completely disabled by a * software over-ride. */ - mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + mii_autoneg_adv_reg &= + ~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); break; case e1000_fc_rx_pause: /* Rx Flow control is enabled, and Tx Flow control is @@ -1076,34 +1034,36 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) * (in e1000e_config_fc_after_link_up) we will disable the * hw's ability to send PAUSE frames. */ - mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + mii_autoneg_adv_reg |= + (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); break; case e1000_fc_tx_pause: /* Tx Flow control is enabled, and Rx Flow control is * disabled, by a software over-ride. */ - mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; - mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM; + mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP; break; case e1000_fc_full: /* Flow control (both Rx and Tx) is enabled by a software * over-ride. */ - mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + mii_autoneg_adv_reg |= + (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); break; default: e_dbg("Flow control param set incorrectly\n"); return -E1000_ERR_CONFIG; } - ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + ret_val = e1e_wphy(hw, MII_ADVERTISE, mii_autoneg_adv_reg); if (ret_val) return ret_val; e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); if (phy->autoneg_mask & ADVERTISE_1000_FULL) - ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); + ret_val = e1e_wphy(hw, MII_CTRL1000, mii_1000t_ctrl_reg); return ret_val; } @@ -1145,12 +1105,12 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) /* Restart auto-negotiation by setting the Auto Neg Enable bit and * the Auto Neg Restart bit in the PHY control register. */ - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); + ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl); if (ret_val) return ret_val; - phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); - ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); + phy_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART); + ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl); if (ret_val) return ret_val; @@ -1196,7 +1156,7 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) * depending on user settings. */ e_dbg("Forcing Speed and Duplex\n"); - ret_val = e1000_phy_force_speed_duplex(hw); + ret_val = hw->phy.ops.force_speed_duplex(hw); if (ret_val) { e_dbg("Error Forcing Speed and Duplex\n"); return ret_val; @@ -1237,13 +1197,13 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) u16 phy_data; bool link; - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); - ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); if (ret_val) return ret_val; @@ -1315,20 +1275,22 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) e_dbg("M88E1000 PSCR: %X\n", phy_data); - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); - ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); if (ret_val) return ret_val; /* Reset the phy to commit changes. */ - ret_val = e1000e_commit_phy(hw); - if (ret_val) - return ret_val; + if (hw->phy.ops.commit) { + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + return ret_val; + } if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); @@ -1406,13 +1368,13 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) u16 data; bool link; - ret_val = e1e_rphy(hw, PHY_CONTROL, &data); + ret_val = e1e_rphy(hw, MII_BMCR, &data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &data); - ret_val = e1e_wphy(hw, PHY_CONTROL, data); + ret_val = e1e_wphy(hw, MII_BMCR, data); if (ret_val) return ret_val; @@ -1456,13 +1418,13 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) /** * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex * @hw: pointer to the HW structure - * @phy_ctrl: pointer to current value of PHY_CONTROL + * @phy_ctrl: pointer to current value of MII_BMCR * * Forces speed and duplex on the PHY by doing the following: disable flow * control, force speed/duplex on the MAC, disable auto speed detection, * disable auto-negotiation, configure duplex, configure speed, configure * the collision distance, write configuration to CTRL register. The - * caller must write to the PHY_CONTROL register for these settings to + * caller must write to the MII_BMCR register for these settings to * take affect. **/ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) @@ -1482,29 +1444,28 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) ctrl &= ~E1000_CTRL_ASDE; /* Disable autoneg on the phy */ - *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + *phy_ctrl &= ~BMCR_ANENABLE; /* Forcing Full or Half Duplex? */ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { ctrl &= ~E1000_CTRL_FD; - *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + *phy_ctrl &= ~BMCR_FULLDPLX; e_dbg("Half Duplex\n"); } else { ctrl |= E1000_CTRL_FD; - *phy_ctrl |= MII_CR_FULL_DUPLEX; + *phy_ctrl |= BMCR_FULLDPLX; e_dbg("Full Duplex\n"); } /* Forcing 10mb or 100mb? */ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { ctrl |= E1000_CTRL_SPD_100; - *phy_ctrl |= MII_CR_SPEED_100; - *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + *phy_ctrl |= BMCR_SPEED100; + *phy_ctrl &= ~BMCR_SPEED1000; e_dbg("Forcing 100mb\n"); } else { ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); - *phy_ctrl |= MII_CR_SPEED_10; - *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + *phy_ctrl &= ~(BMCR_SPEED1000 | BMCR_SPEED100); e_dbg("Forcing 10mb\n"); } @@ -1745,13 +1706,13 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw) /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { - ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; - ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; - if (phy_status & MII_SR_AUTONEG_COMPLETE) + if (phy_status & BMSR_ANEGCOMPLETE) break; msleep(100); } @@ -1778,21 +1739,21 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, u16 i, phy_status; for (i = 0; i < iterations; i++) { - /* Some PHYs require the PHY_STATUS register to be read + /* Some PHYs require the MII_BMSR register to be read * twice due to the link bit being sticky. No harm doing * it across the board. */ - ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) /* If the first read fails, another entity may have * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ udelay(usec_interval); - ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; - if (phy_status & MII_SR_LINK_STATUS) + if (phy_status & BMSR_LSTATUS) break; if (usec_interval >= 1000) mdelay(usec_interval/1000); @@ -1962,21 +1923,19 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { - ret_val = e1000_get_cable_length(hw); + ret_val = hw->phy.ops.get_cable_length(hw); if (ret_val) return ret_val; - ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); + ret_val = e1e_rphy(hw, MII_STAT1000, &phy_data); if (ret_val) return ret_val; - phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + phy->local_rx = (phy_data & LPA_1000LOCALRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; - phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + phy->remote_rx = (phy_data & LPA_1000REMRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { /* Set values to "undefined" */ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; @@ -2026,21 +1985,19 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { - ret_val = e1000_get_cable_length(hw); + ret_val = phy->ops.get_cable_length(hw); if (ret_val) return ret_val; - ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); + ret_val = e1e_rphy(hw, MII_STAT1000, &data); if (ret_val) return ret_val; - phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + phy->local_rx = (data & LPA_1000LOCALRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; - phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + phy->remote_rx = (data & LPA_1000REMRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; @@ -2114,12 +2071,12 @@ s32 e1000e_phy_sw_reset(struct e1000_hw *hw) s32 ret_val; u16 phy_ctrl; - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); + ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl); if (ret_val) return ret_val; - phy_ctrl |= MII_CR_RESET; - ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); + phy_ctrl |= BMCR_RESET; + ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl); if (ret_val) return ret_val; @@ -2166,17 +2123,17 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) phy->ops.release(hw); - return e1000_get_phy_cfg_done(hw); + return phy->ops.get_cfg_done(hw); } /** - * e1000e_get_cfg_done - Generic configuration done + * e1000e_get_cfg_done_generic - Generic configuration done * @hw: pointer to the HW structure * * Generic function to wait 10 milli-seconds for configuration to complete * and return success. **/ -s32 e1000e_get_cfg_done(struct e1000_hw *hw) +s32 e1000e_get_cfg_done_generic(struct e1000_hw __always_unused *hw) { mdelay(10); @@ -2266,38 +2223,6 @@ s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) return 0; } -/* Internal function pointers */ - -/** - * e1000_get_phy_cfg_done - Generic PHY configuration done - * @hw: pointer to the HW structure - * - * Return success if silicon family did not implement a family specific - * get_cfg_done function. - **/ -static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) -{ - if (hw->phy.ops.get_cfg_done) - return hw->phy.ops.get_cfg_done(hw); - - return 0; -} - -/** - * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex - * @hw: pointer to the HW structure - * - * When the silicon family has not implemented a forced speed/duplex - * function for the PHY, simply return 0. - **/ -static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) -{ - if (hw->phy.ops.force_speed_duplex) - return hw->phy.ops.force_speed_duplex(hw); - - return 0; -} - /** * e1000e_get_phy_type_from_id - Get PHY type from id * @phy_id: phy_id read from the phy @@ -2549,7 +2474,6 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) hw->phy.addr = 1; if (offset > MAX_PHY_MULTI_PAGE_REG) { - /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, page); @@ -2672,7 +2596,7 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) **/ s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) { - s32 ret_val = 0; + s32 ret_val; /* Select Port Control Registers page */ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); @@ -2781,9 +2705,9 @@ void e1000_power_up_phy_copper(struct e1000_hw *hw) u16 mii_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ - e1e_rphy(hw, PHY_CONTROL, &mii_reg); - mii_reg &= ~MII_CR_POWER_DOWN; - e1e_wphy(hw, PHY_CONTROL, mii_reg); + e1e_rphy(hw, MII_BMCR, &mii_reg); + mii_reg &= ~BMCR_PDOWN; + e1e_wphy(hw, MII_BMCR, mii_reg); } /** @@ -2799,50 +2723,13 @@ void e1000_power_down_phy_copper(struct e1000_hw *hw) u16 mii_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ - e1e_rphy(hw, PHY_CONTROL, &mii_reg); - mii_reg |= MII_CR_POWER_DOWN; - e1e_wphy(hw, PHY_CONTROL, mii_reg); + e1e_rphy(hw, MII_BMCR, &mii_reg); + mii_reg |= BMCR_PDOWN; + e1e_wphy(hw, MII_BMCR, mii_reg); usleep_range(1000, 2000); } /** - * e1000e_commit_phy - Soft PHY reset - * @hw: pointer to the HW structure - * - * Performs a soft PHY reset on those that apply. This is a function pointer - * entry point called by drivers. - **/ -s32 e1000e_commit_phy(struct e1000_hw *hw) -{ - if (hw->phy.ops.commit) - return hw->phy.ops.commit(hw); - - return 0; -} - -/** - * e1000_set_d0_lplu_state - Sets low power link up state for D0 - * @hw: pointer to the HW structure - * @active: boolean used to enable/disable lplu - * - * Success returns 0, Failure returns 1 - * - * The low power link up (lplu) state is set to the power management level D0 - * and SmartSpeed is disabled when active is true, else clear lplu for D0 - * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU - * is used during Dx states where the power conservation is most important. - * During driver activity, SmartSpeed should be enabled so performance is - * maintained. This is a function pointer entry point called by drivers. - **/ -static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) -{ - if (hw->phy.ops.set_d0_lplu_state) - return hw->phy.ops.set_d0_lplu_state(hw, active); - - return 0; -} - -/** * __e1000_read_phy_reg_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read @@ -3104,8 +2991,8 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool read) { s32 ret_val; - u32 addr_reg = 0; - u32 data_reg = 0; + u32 addr_reg; + u32 data_reg; /* This takes care of the difference with desktop vs mobile phy */ addr_reg = (hw->phy.type == e1000_phy_82578) ? @@ -3154,8 +3041,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) return 0; /* Do not apply workaround if in PHY loopback bit 14 set */ - e1e_rphy(hw, PHY_CONTROL, &data); - if (data & PHY_CONTROL_LB) + e1e_rphy(hw, MII_BMCR, &data); + if (data & BMCR_LOOPBACK) return 0; /* check if link is up and at 1Gbps */ @@ -3173,8 +3060,9 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) msleep(200); /* flush the packets in the fifo buffer */ - ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC | - HV_MUX_DATA_CTRL_FORCE_SPEED); + ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, + (HV_MUX_DATA_CTRL_GEN_TO_MAC | + HV_MUX_DATA_CTRL_FORCE_SPEED)); if (ret_val) return ret_val; @@ -3218,13 +3106,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) u16 phy_data; bool link; - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); - ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); if (ret_val) return ret_val; @@ -3292,17 +3180,15 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) if (ret_val) return ret_val; - ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); + ret_val = e1e_rphy(hw, MII_STAT1000, &data); if (ret_val) return ret_val; - phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + phy->local_rx = (data & LPA_1000LOCALRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; - phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; + phy->remote_rx = (data & LPA_1000REMRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; @@ -3333,7 +3219,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) I82577_DSTATUS_CABLE_LENGTH_SHIFT; if (length == E1000_CABLE_LENGTH_UNDEFINED) - ret_val = -E1000_ERR_PHY; + return -E1000_ERR_PHY; phy->cable_length = length; diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h new file mode 100644 index 000000000000..f4f71b9991e3 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/phy.h @@ -0,0 +1,242 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_PHY_H_ +#define _E1000E_PHY_H_ + +s32 e1000e_check_downshift(struct e1000_hw *hw); +s32 e1000_check_polarity_m88(struct e1000_hw *hw); +s32 e1000_check_polarity_igp(struct e1000_hw *hw); +s32 e1000_check_polarity_ife(struct e1000_hw *hw); +s32 e1000e_check_reset_block_generic(struct e1000_hw *hw); +s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); +s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); +s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); +s32 e1000e_get_cable_length_m88(struct e1000_hw *hw); +s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); +s32 e1000e_get_cfg_done_generic(struct e1000_hw *hw); +s32 e1000e_get_phy_id(struct e1000_hw *hw); +s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); +s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); +s32 e1000_get_phy_info_ife(struct e1000_hw *hw); +s32 e1000e_phy_sw_reset(struct e1000_hw *hw); +void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); +s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); +s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); +s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000e_setup_copper_link(struct e1000_hw *hw); +s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw); +enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); +s32 e1000e_determine_phy_address(struct e1000_hw *hw); +s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); +void e1000_power_up_phy_copper(struct e1000_hw *hw); +void e1000_power_down_phy_copper(struct e1000_hw *hw); +s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); +s32 e1000_check_polarity_82577(struct e1000_hw *hw); +s32 e1000_get_phy_info_82577(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); +s32 e1000_get_cable_length_82577(struct e1000_hw *hw); + +#define E1000_MAX_PHY_ADDR 8 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +/* BM/HV Specific Registers */ +#define BM_PORT_CTRL_PAGE 769 +#define BM_WUC_PAGE 800 +#define BM_WUC_ADDRESS_OPCODE 0x11 +#define BM_WUC_DATA_OPCODE 0x12 +#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE +#define BM_WUC_ENABLE_REG 17 +#define BM_WUC_ENABLE_BIT (1 << 2) +#define BM_WUC_HOST_WU_BIT (1 << 4) +#define BM_WUC_ME_WU_BIT (1 << 5) + +#define PHY_UPPER_SHIFT 21 +#define BM_PHY_REG(page, reg) \ + (((reg) & MAX_PHY_REG_ADDRESS) |\ + (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\ + (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT))) +#define BM_PHY_REG_PAGE(offset) \ + ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) +#define BM_PHY_REG_NUM(offset) \ + ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ + (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ + ~MAX_PHY_REG_ADDRESS))) + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ +#define I82577_CTRL_REG 23 + +/* 82577 specific PHY registers */ +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_LBK_CTRL 19 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 + +/* I82577 PHY Status 2 */ +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 + +/* I82577 PHY Control 2 */ +#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82577 PHY Diagnostics Status */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* BM PHY Copper Specific Control 1 */ +#define BM_CS_CTRL1 16 + +/* BM PHY Copper Specific Status */ +#define BM_CS_STATUS 17 +#define BM_CS_STATUS_LINK_UP 0x0400 +#define BM_CS_STATUS_RESOLVED 0x0800 +#define BM_CS_STATUS_SPEED_MASK 0xC000 +#define BM_CS_STATUS_SPEED_1000 0x8000 + +/* 82577 Mobile Phy Status Register */ +#define HV_M_STATUS 26 +#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 +#define HV_M_STATUS_SPEED_MASK 0x0300 +#define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_LINK_UP 0x0040 + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ +#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 +#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */ +#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c new file mode 100644 index 000000000000..b477fa53ec94 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -0,0 +1,277 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* PTP 1588 Hardware Clock (PHC) + * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb) + * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com> + */ + +#include "e1000.h" + +/** + * e1000e_phc_adjfreq - adjust the frequency of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired frequency change in parts per billion + * + * Adjust the frequency of the PHC cycle counter by the indicated delta from + * the base frequency. + **/ +static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + struct e1000_hw *hw = &adapter->hw; + bool neg_adj = false; + u64 adjustment; + u32 timinca, incvalue; + s32 ret_val; + + if ((delta > ptp->max_adj) || (delta <= -1000000000)) + return -EINVAL; + + if (delta < 0) { + neg_adj = true; + delta = -delta; + } + + /* Get the System Time Register SYSTIM base frequency */ + ret_val = e1000e_get_base_timinca(adapter, &timinca); + if (ret_val) + return ret_val; + + incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK; + + adjustment = incvalue; + adjustment *= delta; + adjustment = div_u64(adjustment, 1000000000); + + incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment); + + timinca &= ~E1000_TIMINCA_INCVALUE_MASK; + timinca |= incvalue; + + ew32(TIMINCA, timinca); + + return 0; +} + +/** + * e1000e_phc_adjtime - Shift the time of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired change in nanoseconds + * + * Adjust the timer by resetting the timecounter structure. + **/ +static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + unsigned long flags; + s64 now; + + spin_lock_irqsave(&adapter->systim_lock, flags); + now = timecounter_read(&adapter->tc); + now += delta; + timecounter_init(&adapter->tc, &adapter->cc, now); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + return 0; +} + +/** + * e1000e_phc_gettime - Reads the current time from the hardware clock + * @ptp: ptp clock structure + * @ts: timespec structure to hold the current time value + * + * Read the timecounter and return the correct value in ns after converting + * it into a struct timespec. + **/ +static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + unsigned long flags; + u32 remainder; + u64 ns; + + spin_lock_irqsave(&adapter->systim_lock, flags); + ns = timecounter_read(&adapter->tc); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder); + ts->tv_nsec = remainder; + + return 0; +} + +/** + * e1000e_phc_settime - Set the current time on the hardware clock + * @ptp: ptp clock structure + * @ts: timespec containing the new time for the cycle counter + * + * Reset the timecounter to use a new base value instead of the kernel + * wall timer value. + **/ +static int e1000e_phc_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + unsigned long flags; + u64 ns; + + ns = ts->tv_sec * NSEC_PER_SEC; + ns += ts->tv_nsec; + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->systim_lock, flags); + timecounter_init(&adapter->tc, &adapter->cc, ns); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + return 0; +} + +/** + * e1000e_phc_enable - enable or disable an ancillary feature + * @ptp: ptp clock structure + * @request: Desired resource to enable or disable + * @on: Caller passes one to enable or zero to disable + * + * Enable (or disable) ancillary features of the PHC subsystem. + * Currently, no ancillary features are supported. + **/ +static int e1000e_phc_enable(struct ptp_clock_info __always_unused *ptp, + struct ptp_clock_request __always_unused *request, + int __always_unused on) +{ + return -EOPNOTSUPP; +} + +static void e1000e_systim_overflow_work(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, + systim_overflow_work.work); + struct e1000_hw *hw = &adapter->hw; + struct timespec ts; + + adapter->ptp_clock_info.gettime(&adapter->ptp_clock_info, &ts); + + e_dbg("SYSTIM overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); + + schedule_delayed_work(&adapter->systim_overflow_work, + E1000_SYSTIM_OVERFLOW_PERIOD); +} + +static const struct ptp_clock_info e1000e_ptp_clock_info = { + .owner = THIS_MODULE, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .pps = 0, + .adjfreq = e1000e_phc_adjfreq, + .adjtime = e1000e_phc_adjtime, + .gettime = e1000e_phc_gettime, + .settime = e1000e_phc_settime, + .enable = e1000e_phc_enable, +}; + +/** + * e1000e_ptp_init - initialize PTP for devices which support it + * @adapter: board private structure + * + * This function performs the required steps for enabling PTP support. + * If PTP support has already been loaded it simply calls the cyclecounter + * init routine and exits. + **/ +void e1000e_ptp_init(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + adapter->ptp_clock = NULL; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return; + + adapter->ptp_clock_info = e1000e_ptp_clock_info; + + snprintf(adapter->ptp_clock_info.name, + sizeof(adapter->ptp_clock_info.name), "%pm", + adapter->netdev->perm_addr); + + switch (hw->mac.type) { + case e1000_pch2lan: + case e1000_pch_lpt: + if ((hw->mac.type != e1000_pch_lpt) || + (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { + adapter->ptp_clock_info.max_adj = 24000000 - 1; + break; + } + /* fall-through */ + case e1000_82574: + case e1000_82583: + adapter->ptp_clock_info.max_adj = 600000000 - 1; + break; + default: + break; + } + + INIT_DELAYED_WORK(&adapter->systim_overflow_work, + e1000e_systim_overflow_work); + + schedule_delayed_work(&adapter->systim_overflow_work, + E1000_SYSTIM_OVERFLOW_PERIOD); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + e_err("ptp_clock_register failed\n"); + } else { + e_info("registered PHC clock\n"); + } +} + +/** + * e1000e_ptp_remove - disable PTP device and stop the overflow check + * @adapter: board private structure + * + * Stop the PTP support, and cancel the delayed work. + **/ +void e1000e_ptp_remove(struct e1000_adapter *adapter) +{ + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return; + + cancel_delayed_work_sync(&adapter->systim_overflow_work); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_info("removed PHC\n"); + } +} diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h new file mode 100644 index 000000000000..794fe1497666 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -0,0 +1,252 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_REGS_H_ +#define _E1000E_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ +#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ +#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ +#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */ +#define E1000_SVCR 0x000F0 +#define E1000_SVT 0x000F4 +#define E1000_LPIC 0x000FC /* Low Power IDLE control */ +#define E1000_RCTL 0x00100 /* Rx Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */ +#define E1000_TCTL 0x00400 /* Tx Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ +/* Split and Replication Rx Control - RW */ +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) +#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */ + +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ + +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +/* Management Decision Filters */ +#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) +#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +/* Driver-only SW semaphore (not used by BOOT agents) */ +#define E1000_SWSM2 0x05B58 +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ +#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ + +#endif diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile index 624476cfa727..f19700e285bb 100644 --- a/drivers/net/ethernet/intel/igb/Makefile +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel 82575 PCI-Express Ethernet Linux driver -# Copyright(c) 1999 - 2012 Intel Corporation. +# Copyright(c) 1999 - 2013 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -34,4 +34,4 @@ obj-$(CONFIG_IGB) += igb.o igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ - e1000_i210.o igb_ptp.o + e1000_i210.o igb_ptp.o igb_hwmon.o diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index fdaaf2709d0a..84e7e0909def 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -33,6 +33,7 @@ #include <linux/types.h> #include <linux/if_ether.h> +#include <linux/i2c.h> #include "e1000_mac.h" #include "e1000_82575.h" @@ -110,184 +111,168 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) return ext_mdio; } -static s32 igb_get_invariants_82575(struct e1000_hw *hw) +/** + * igb_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_phy_params_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - struct e1000_nvm_info *nvm = &hw->nvm; - struct e1000_mac_info *mac = &hw->mac; - struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; - u32 eecd; - s32 ret_val; - u16 size; - u32 ctrl_ext = 0; + s32 ret_val = 0; + u32 ctrl_ext; - switch (hw->device_id) { - case E1000_DEV_ID_82575EB_COPPER: - case E1000_DEV_ID_82575EB_FIBER_SERDES: - case E1000_DEV_ID_82575GB_QUAD_COPPER: - mac->type = e1000_82575; - break; - case E1000_DEV_ID_82576: - case E1000_DEV_ID_82576_NS: - case E1000_DEV_ID_82576_NS_SERDES: - case E1000_DEV_ID_82576_FIBER: - case E1000_DEV_ID_82576_SERDES: - case E1000_DEV_ID_82576_QUAD_COPPER: - case E1000_DEV_ID_82576_QUAD_COPPER_ET2: - case E1000_DEV_ID_82576_SERDES_QUAD: - mac->type = e1000_82576; - break; - case E1000_DEV_ID_82580_COPPER: - case E1000_DEV_ID_82580_FIBER: - case E1000_DEV_ID_82580_QUAD_FIBER: - case E1000_DEV_ID_82580_SERDES: - case E1000_DEV_ID_82580_SGMII: - case E1000_DEV_ID_82580_COPPER_DUAL: - case E1000_DEV_ID_DH89XXCC_SGMII: - case E1000_DEV_ID_DH89XXCC_SERDES: - case E1000_DEV_ID_DH89XXCC_BACKPLANE: - case E1000_DEV_ID_DH89XXCC_SFP: - mac->type = e1000_82580; - break; - case E1000_DEV_ID_I350_COPPER: - case E1000_DEV_ID_I350_FIBER: - case E1000_DEV_ID_I350_SERDES: - case E1000_DEV_ID_I350_SGMII: - mac->type = e1000_i350; - break; - case E1000_DEV_ID_I210_COPPER: - case E1000_DEV_ID_I210_COPPER_OEM1: - case E1000_DEV_ID_I210_COPPER_IT: - case E1000_DEV_ID_I210_FIBER: - case E1000_DEV_ID_I210_SERDES: - case E1000_DEV_ID_I210_SGMII: - mac->type = e1000_i210; - break; - case E1000_DEV_ID_I211_COPPER: - mac->type = e1000_i211; - break; - default: - return -E1000_ERR_MAC_INIT; - break; + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; } - /* Set media type */ - /* - * The 82575 uses bits 22:23 for link mode. The mode can be changed - * based on the EEPROM. We cannot rely upon device ID. There - * is no distinguishable difference between fiber and internal - * SerDes mode on the 82575. There can be an external PHY attached - * on the SGMII interface. For this, we'll set sgmii_active to true. - */ - phy->media_type = e1000_media_type_copper; - dev_spec->sgmii_active = false; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; ctrl_ext = rd32(E1000_CTRL_EXT); - switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { - case E1000_CTRL_EXT_LINK_MODE_SGMII: - dev_spec->sgmii_active = true; - break; - case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: - case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: - hw->phy.media_type = e1000_media_type_internal_serdes; - break; - default: - break; + + if (igb_sgmii_active_82575(hw)) { + phy->ops.reset = igb_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = igb_phy_hw_reset; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; } - /* Set mta register count */ - mac->mta_reg_count = 128; - /* Set rar entry count */ - switch (mac->type) { - case e1000_82576: - mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + wr32(E1000_CTRL_EXT, ctrl_ext); + igb_reset_mdicnfg_82580(hw); + + if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; + phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + phy->ops.read_reg = igb_read_phy_reg_82580; + phy->ops.write_reg = igb_write_phy_reg_82580; + break; + case e1000_i210: + case e1000_i211: + phy->ops.read_reg = igb_read_phy_reg_gs40g; + phy->ops.write_reg = igb_write_phy_reg_gs40g; + break; + default: + phy->ops.read_reg = igb_read_phy_reg_igp; + phy->ops.write_reg = igb_write_phy_reg_igp; + } + } + + /* set lan id */ + hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> + E1000_STATUS_FUNC_SHIFT; + + /* Set phy->phy_addr and phy->id. */ + ret_val = igb_get_phy_id_82575(hw); + if (ret_val) + return ret_val; + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.get_phy_info = igb_get_phy_info_m88; + if (phy->id == I347AT4_E_PHY_ID || + phy->id == M88E1112_E_PHY_ID) + phy->ops.get_cable_length = + igb_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = igb_get_cable_length_m88; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; break; - case e1000_82580: - mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.get_phy_info = igb_get_phy_info_igp; + phy->ops.get_cable_length = igb_get_cable_length_igp_2; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; break; - case e1000_i350: - mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.force_speed_duplex = + igb_phy_force_speed_duplex_82580; + phy->ops.get_cable_length = igb_get_cable_length_82580; + phy->ops.get_phy_info = igb_get_phy_info_82580; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; break; - default: - mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_phy_info = igb_get_phy_info_m88; + phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; break; + default: + ret_val = -E1000_ERR_PHY; + goto out; } - /* reset */ - if (mac->type >= e1000_82580) - mac->ops.reset_hw = igb_reset_hw_82580; - else - mac->ops.reset_hw = igb_reset_hw_82575; - if (mac->type >= e1000_i210) { - mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; - mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; - } else { - mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; - mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; - } +out: + return ret_val; +} - /* Set if part includes ASF firmware */ - mac->asf_firmware_present = true; - /* Set if manageability features are enabled. */ - mac->arc_subsystem_valid = - (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) - ? true : false; - /* enable EEE on i350 parts and later parts */ - if (mac->type >= e1000_i350) - dev_spec->eee_disable = false; - else - dev_spec->eee_disable = true; - /* physical interface link setup */ - mac->ops.setup_physical_interface = - (hw->phy.media_type == e1000_media_type_copper) - ? igb_setup_copper_link_82575 - : igb_setup_serdes_link_82575; +/** + * igb_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 igb_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u16 size; - /* NVM initialization */ - eecd = rd32(E1000_EECD); size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> E1000_EECD_SIZE_EX_SHIFT); - - /* - * Added to a constant, "size" becomes the left-shift value + /* Added to a constant, "size" becomes the left-shift value * for setting word_size. */ size += NVM_WORD_SIZE_BASE_SHIFT; - /* - * Check for invalid size + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported */ - if ((hw->mac.type == e1000_82576) && (size > 15)) { - pr_notice("The NVM size is not valid, defaulting to 32K\n"); + if (size > 15) size = 15; - } nvm->word_size = 1 << size; if (hw->mac.type < e1000_i210) { - nvm->opcode_bits = 8; - nvm->delay_usec = 1; + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { case e1000_nvm_override_spi_large: - nvm->page_size = 32; + nvm->page_size = 32; nvm->address_bits = 16; break; case e1000_nvm_override_spi_small: - nvm->page_size = 8; + nvm->page_size = 8; nvm->address_bits = 8; break; default: - nvm->page_size = eecd - & E1000_EECD_ADDR_BITS ? 32 : 8; - nvm->address_bits = eecd - & E1000_EECD_ADDR_BITS ? 16 : 8; + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? + 16 : 8; break; } if (nvm->word_size == (1 << 15)) nvm->page_size = 128; nvm->type = e1000_nvm_eeprom_spi; - } else + } else { nvm->type = e1000_nvm_flash_hw; + } /* NVM Function Pointers */ switch (hw->mac.type) { @@ -344,118 +329,176 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) break; } - /* if part supports SR-IOV then initialize mailbox parameters */ + return 0; +} + +/** + * igb_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ switch (mac->type) { case e1000_82576: + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + break; + case e1000_82580: + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + break; case e1000_i350: - igb_init_mbx_params_pf(hw); + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; break; default: + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; break; } + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = igb_reset_hw_82580; + else + mac->ops.reset_hw = igb_reset_hw_82575; - /* setup PHY parameters */ - if (phy->media_type != e1000_media_type_copper) { - phy->type = e1000_phy_none; - return 0; - } - - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - phy->reset_delay_us = 100; - - ctrl_ext = rd32(E1000_CTRL_EXT); + if (mac->type >= e1000_i210) { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; + mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; - /* PHY function pointers */ - if (igb_sgmii_active_82575(hw)) { - phy->ops.reset = igb_phy_hw_reset_sgmii_82575; - ctrl_ext |= E1000_CTRL_I2C_ENA; } else { - phy->ops.reset = igb_phy_hw_reset; - ctrl_ext &= ~E1000_CTRL_I2C_ENA; + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; } - wr32(E1000_CTRL_EXT, ctrl_ext); - igb_reset_mdicnfg_82580(hw); - - if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { - phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; - phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; - } else if ((hw->mac.type == e1000_82580) - || (hw->mac.type == e1000_i350)) { - phy->ops.read_reg = igb_read_phy_reg_82580; - phy->ops.write_reg = igb_write_phy_reg_82580; - } else if (hw->phy.type >= e1000_phy_i210) { - phy->ops.read_reg = igb_read_phy_reg_gs40g; - phy->ops.write_reg = igb_write_phy_reg_gs40g; - } else { - phy->ops.read_reg = igb_read_phy_reg_igp; - phy->ops.write_reg = igb_write_phy_reg_igp; - } + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) + ? true : false; + /* enable EEE on i350 parts and later parts */ + if (mac->type >= e1000_i350) + dev_spec->eee_disable = false; + else + dev_spec->eee_disable = true; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? igb_setup_copper_link_82575 + : igb_setup_serdes_link_82575; - /* set lan id */ - hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> - E1000_STATUS_FUNC_SHIFT; + return 0; +} - /* Set phy->phy_addr and phy->id. */ - ret_val = igb_get_phy_id_82575(hw); - if (ret_val) - return ret_val; +static s32 igb_get_invariants_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; + s32 ret_val; + u32 ctrl_ext = 0; - /* Verify phy id and set remaining function pointers */ - switch (phy->id) { - case I347AT4_E_PHY_ID: - case M88E1112_E_PHY_ID: - case M88E1111_I_PHY_ID: - phy->type = e1000_phy_m88; - phy->ops.get_phy_info = igb_get_phy_info_m88; + switch (hw->device_id) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + mac->type = e1000_i350; + break; + case E1000_DEV_ID_I210_COPPER: + case E1000_DEV_ID_I210_COPPER_OEM1: + case E1000_DEV_ID_I210_COPPER_IT: + case E1000_DEV_ID_I210_FIBER: + case E1000_DEV_ID_I210_SERDES: + case E1000_DEV_ID_I210_SGMII: + mac->type = e1000_i210; + break; + case E1000_DEV_ID_I211_COPPER: + mac->type = e1000_i211; + break; + default: + return -E1000_ERR_MAC_INIT; + break; + } - if (phy->id == I347AT4_E_PHY_ID || - phy->id == M88E1112_E_PHY_ID) - phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; - else - phy->ops.get_cable_length = igb_get_cable_length_m88; + /* Set media type */ + /* + * The 82575 uses bits 22:23 for link mode. The mode can be changed + * based on the EEPROM. We cannot rely upon device ID. There + * is no distinguishable difference between fiber and internal + * SerDes mode on the 82575. There can be an external PHY attached + * on the SGMII interface. For this, we'll set sgmii_active to true. + */ + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = false; - if (phy->id == I210_I_PHY_ID) { - phy->ops.get_cable_length = - igb_get_cable_length_m88_gen2; - phy->ops.set_d0_lplu_state = - igb_set_d0_lplu_state_82580; - phy->ops.set_d3_lplu_state = - igb_set_d3_lplu_state_82580; - } - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + ctrl_ext = rd32(E1000_CTRL_EXT); + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + dev_spec->sgmii_active = true; break; - case IGP03E1000_E_PHY_ID: - phy->type = e1000_phy_igp_3; - phy->ops.get_phy_info = igb_get_phy_info_igp; - phy->ops.get_cable_length = igb_get_cable_length_igp_2; - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; - phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; - phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + hw->phy.media_type = e1000_media_type_internal_serdes; break; - case I82580_I_PHY_ID: - case I350_I_PHY_ID: - phy->type = e1000_phy_82580; - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; - phy->ops.get_cable_length = igb_get_cable_length_82580; - phy->ops.get_phy_info = igb_get_phy_info_82580; - phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; - phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + default: break; - case I210_I_PHY_ID: - phy->type = e1000_phy_i210; - phy->ops.get_phy_info = igb_get_phy_info_m88; - phy->ops.check_polarity = igb_check_polarity_m88; - phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; - phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; - phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + } + + /* mac initialization and operations */ + ret_val = igb_init_mac_params_82575(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igb_init_nvm_params_82575(hw); + if (ret_val) + goto out; + + /* if part supports SR-IOV then initialize mailbox parameters */ + switch (mac->type) { + case e1000_82576: + case e1000_i350: + igb_init_mbx_params_pf(hw); break; default: - return -E1000_ERR_PHY; + break; } - return 0; + /* setup PHY parameters */ + ret_val = igb_init_phy_params_82575(hw); + +out: + return ret_val; } /** @@ -2302,18 +2345,157 @@ out: return ret_val; } +static const u8 e1000_emc_temp_data[4] = { + E1000_EMC_INTERNAL_DATA, + E1000_EMC_DIODE1_DATA, + E1000_EMC_DIODE2_DATA, + E1000_EMC_DIODE3_DATA +}; +static const u8 e1000_emc_therm_limit[4] = { + E1000_EMC_INTERNAL_THERM_LIMIT, + E1000_EMC_DIODE1_THERM_LIMIT, + E1000_EMC_DIODE2_THERM_LIMIT, + E1000_EMC_DIODE3_THERM_LIMIT +}; + +/* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + */ +s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return status; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + if (num_sensors > E1000_MAX_SENSORS) + num_sensors = E1000_MAX_SENSORS; + + for (i = 1; i < num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) + hw->phy.ops.read_i2c_byte(hw, + e1000_emc_temp_data[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + } + return status; +} + +/* igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + */ +s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 therm_limit; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); + + data->sensor[0].location = 0x1; + data->sensor[0].caution_thresh = + (rd32(E1000_THHIGHTC) & 0xFF); + data->sensor[0].max_op_thresh = + (rd32(E1000_THLOWTC) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return status; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> + NVM_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + + for (i = 1; i <= num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + e1000_emc_therm_limit[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + therm_limit); + + if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return status; +} + static struct e1000_mac_operations e1000_mac_ops_82575 = { .init_hw = igb_init_hw_82575, .check_for_link = igb_check_for_link_82575, .rar_set = igb_rar_set, .read_mac_addr = igb_read_mac_addr_82575, .get_speed_and_duplex = igb_get_speed_and_duplex_copper, +#ifdef CONFIG_IGB_HWMON + .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, + .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, +#endif }; static struct e1000_phy_operations e1000_phy_ops_82575 = { .acquire = igb_acquire_phy_82575, .get_cfg_done = igb_get_cfg_done_82575, .release = igb_release_phy_82575, + .write_i2c_byte = igb_write_i2c_byte, + .read_i2c_byte = igb_read_i2c_byte, }; static struct e1000_nvm_operations e1000_nvm_ops_82575 = { diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index 44b76b3b6816..73ab41f0e032 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -32,6 +32,10 @@ extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw); extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw); extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); +extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ (ID_LED_DEF1_DEF2 << 8) | \ @@ -260,5 +264,16 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); u16 igb_rxpbs_adjust_82580(u32 data); s32 igb_set_eee_i350(struct e1000_hw *); - +s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *); +s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw); + +#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define E1000_EMC_INTERNAL_DATA 0x00 +#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 +#define E1000_EMC_DIODE1_DATA 0x01 +#define E1000_EMC_DIODE1_THERM_LIMIT 0x19 +#define E1000_EMC_DIODE2_DATA 0x23 +#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A +#define E1000_EMC_DIODE3_DATA 0x2A +#define E1000_EMC_DIODE3_THERM_LIMIT 0x30 #endif diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 45dce06eff26..7e13337d3b9d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -470,6 +470,7 @@ #define E1000_ERR_NO_SPACE 17 #define E1000_ERR_NVM_PBA_SECTION 18 #define E1000_ERR_INVM_VALUE_NOT_FOUND 19 +#define E1000_ERR_I2C 20 /* Loop limit on how long we wait for auto-negotiation to complete */ #define COPPER_LINK_UP_LIMIT 10 @@ -674,6 +675,18 @@ #define NVM_COMB_VER_SHFT 8 #define NVM_VER_INVALID 0xFFFF #define NVM_ETRACK_SHIFT 16 +#define NVM_ETS_CFG 0x003E +#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 +#define NVM_ETS_LTHRES_DELTA_SHIFT 6 +#define NVM_ETS_TYPE_MASK 0x0038 +#define NVM_ETS_TYPE_SHIFT 3 +#define NVM_ETS_TYPE_EMC 0x000 +#define NVM_ETS_NUM_SENSORS_MASK 0x0007 +#define NVM_ETS_DATA_LOC_MASK 0x3C00 +#define NVM_ETS_DATA_LOC_SHIFT 10 +#define NVM_ETS_DATA_INDEX_MASK 0x0300 +#define NVM_ETS_DATA_INDEX_SHIFT 8 +#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF #define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ #define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index c2a51dcda550..0d5cf9c63d0d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -325,6 +325,10 @@ struct e1000_mac_operations { s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); void (*release_swfw_sync)(struct e1000_hw *, u16); +#ifdef CONFIG_IGB_HWMON + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); +#endif }; @@ -342,6 +346,8 @@ struct e1000_phy_operations { s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); }; struct e1000_nvm_operations { @@ -354,6 +360,19 @@ struct e1000_nvm_operations { s32 (*valid_led_default)(struct e1000_hw *, u16 *); }; +#define E1000_MAX_SENSORS 3 + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; +}; + struct e1000_info { s32 (*get_invariants)(struct e1000_hw *); struct e1000_mac_operations *mac_ops; @@ -399,6 +418,7 @@ struct e1000_mac_info { bool report_tx_early; bool serdes_has_link; bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; }; struct e1000_phy_info { diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index fbcdbebb0b5f..6a42344f24f1 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index 1c89358a99ab..e4e1a73b7c75 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 101e6e4da97f..a5c7200b9a71 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index e2b2c4b9c951..e6d6ce433261 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 5988b8958baf..38e0df350904 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index dbcfa3d5caec..c13b56d9edb2 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index fbb7604db364..5b62adbe134d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h index 7012d458c6f7..6bfc0c43aace 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.h +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2012 Intel Corporation. + Copyright(c) 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index fe76004aca4e..2918c979b5bb 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index ed282f877d9a..784fd1c40989 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index e5db48594e8a..15343286082e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -75,6 +75,14 @@ #define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ #define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ #define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ /* IEEE 1588 TIMESYNCH */ #define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ @@ -124,6 +132,14 @@ /* Split and Replication RX Control - RW */ #define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ + +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + /* * Convenience macros * diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 17f1686ee411..d27edbc63923 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -39,6 +39,8 @@ #include <linux/ptp_clock_kernel.h> #include <linux/bitops.h> #include <linux/if_vlan.h> +#include <linux/i2c.h> +#include <linux/i2c-algo-bit.h> struct igb_adapter; @@ -137,8 +139,6 @@ struct vf_data_storage { #define IGB_RX_HDR_LEN IGB_RXBUFFER_256 #define IGB_RX_BUFSZ IGB_RXBUFFER_2048 -/* How many Tx Descriptors do we need to call netif_wake_queue ? */ -#define IGB_TX_QUEUE_WAKE 16 /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ @@ -167,6 +167,17 @@ enum igb_tx_flags { #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 #define IGB_TX_FLAGS_VLAN_SHIFT 16 +/* + * The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGB_MAX_TXD_PWR 15 +#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct igb_tx_buffer { @@ -219,6 +230,7 @@ struct igb_ring { struct igb_tx_buffer *tx_buffer_info; struct igb_rx_buffer *rx_buffer_info; }; + unsigned long last_rx_timestamp; void *desc; /* descriptor ring memory */ unsigned long flags; /* ring specific flags */ void __iomem *tail; /* pointer to ring tail register */ @@ -272,10 +284,18 @@ struct igb_q_vector { enum e1000_ring_flags_t { IGB_RING_FLAG_RX_SCTP_CSUM, IGB_RING_FLAG_RX_LB_VLAN_BSWAP, + IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, IGB_RING_FLAG_TX_CTX_IDX, IGB_RING_FLAG_TX_DETECT_HANG }; +#define ring_uses_build_skb(ring) \ + test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) +#define set_ring_build_skb_enabled(ring) \ + set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) +#define clear_ring_build_skb_enabled(ring) \ + clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) #define IGB_RX_DESC(R, i) \ @@ -301,6 +321,32 @@ static inline int igb_desc_unused(struct igb_ring *ring) return ring->count + ring->next_to_clean - ring->next_to_use - 1; } +struct igb_i2c_client_list { + struct i2c_client *client; + struct igb_i2c_client_list *next; +}; + +#ifdef CONFIG_IGB_HWMON + +#define IGB_HWMON_TYPE_LOC 0 +#define IGB_HWMON_TYPE_TEMP 1 +#define IGB_HWMON_TYPE_CAUTION 2 +#define IGB_HWMON_TYPE_MAX 3 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; + }; + +struct hwmon_buff { + struct device *device; + struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; + }; +#endif + /* board specific private data structure */ struct igb_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -386,11 +432,22 @@ struct igb_adapter { struct delayed_work ptp_overflow_work; struct work_struct ptp_tx_work; struct sk_buff *ptp_tx_skb; + unsigned long ptp_tx_start; + unsigned long last_rx_ptp_check; spinlock_t tmreg_lock; struct cyclecounter cc; struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; char fw_version[32]; +#ifdef CONFIG_IGB_HWMON + struct hwmon_buff igb_hwmon_buff; + bool ets; +#endif + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct igb_i2c_client_list *i2c_clients; }; #define IGB_FLAG_HAS_MSI (1 << 0) @@ -449,6 +506,7 @@ extern void igb_ptp_init(struct igb_adapter *adapter); extern void igb_ptp_stop(struct igb_adapter *adapter); extern void igb_ptp_reset(struct igb_adapter *adapter); extern void igb_ptp_tx_work(struct work_struct *work); +extern void igb_ptp_rx_hang(struct igb_adapter *adapter); extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); @@ -466,7 +524,10 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); - +#ifdef CONFIG_IGB_HWMON +extern void igb_sysfs_exit(struct igb_adapter *adapter); +extern int igb_sysfs_init(struct igb_adapter *adapter); +#endif static inline s32 igb_reset_phy(struct e1000_hw *hw) { if (hw->phy.ops.reset) diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index bfe9208c4b18..a3830a8ba4c1 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -92,6 +92,8 @@ static const struct igb_stats igb_gstrings_stats[] = { IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), }; #define IGB_NETDEV_STAT(_net_stat) { \ @@ -1889,7 +1891,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data) } else { hw->mac.ops.check_for_link(&adapter->hw); if (hw->mac.autoneg) - msleep(4000); + msleep(5000); if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) *data = 1; @@ -2272,12 +2274,21 @@ static int igb_get_ts_info(struct net_device *dev, struct igb_adapter *adapter = netdev_priv(dev); switch (adapter->hw.mac.type) { + case e1000_82575: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + return 0; case e1000_82576: case e1000_82580: case e1000_i350: case e1000_i210: case e1000_i211: info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c new file mode 100644 index 000000000000..0a9b073d0b03 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c @@ -0,0 +1,242 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "igb.h" +#include "e1000_82575.h" +#include "e1000_hw.h" + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/sysfs.h> +#include <linux/kobject.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/hwmon.h> +#include <linux/pci.h> + +#ifdef CONFIG_IGB_HWMON +/* hwmon callback functions */ +static ssize_t igb_hwmon_show_location(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + return sprintf(buf, "loc%u\n", + igb_attr->sensor->location); +} + +static ssize_t igb_hwmon_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw); + + value = igb_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->caution_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @ adapter: pointer to the adapter structure + * @ offset: offset in the eeprom sensor data table + * @ type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int igb_add_hwmon_attr(struct igb_adapter *adapter, + unsigned int offset, int type) { + int rc; + unsigned int n_attr; + struct hwmon_attr *igb_attr; + + n_attr = adapter->igb_hwmon_buff.n_hwmon; + igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr]; + + switch (type) { + case IGB_HWMON_TYPE_LOC: + igb_attr->dev_attr.show = igb_hwmon_show_location; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_label", offset); + break; + case IGB_HWMON_TYPE_TEMP: + igb_attr->dev_attr.show = igb_hwmon_show_temp; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_input", offset); + break; + case IGB_HWMON_TYPE_CAUTION: + igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_max", offset); + break; + case IGB_HWMON_TYPE_MAX: + igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_crit", offset); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + igb_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor[offset]; + igb_attr->hw = &adapter->hw; + igb_attr->dev_attr.store = NULL; + igb_attr->dev_attr.attr.mode = S_IRUGO; + igb_attr->dev_attr.attr.name = igb_attr->name; + sysfs_attr_init(&igb_attr->dev_attr.attr); + rc = device_create_file(&adapter->pdev->dev, + &igb_attr->dev_attr); + if (rc == 0) + ++adapter->igb_hwmon_buff.n_hwmon; + + return rc; +} + +static void igb_sysfs_del_adapter(struct igb_adapter *adapter) +{ + int i; + + if (adapter == NULL) + return; + + for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) { + device_remove_file(&adapter->pdev->dev, + &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr); + } + + kfree(adapter->igb_hwmon_buff.hwmon_list); + + if (adapter->igb_hwmon_buff.device) + hwmon_device_unregister(adapter->igb_hwmon_buff.device); +} + +/* called from igb_main.c */ +void igb_sysfs_exit(struct igb_adapter *adapter) +{ + igb_sysfs_del_adapter(adapter); +} + +/* called from igb_main.c */ +int igb_sysfs_init(struct igb_adapter *adapter) +{ + struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff; + unsigned int i; + int n_attrs; + int rc = 0; + + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) + goto exit; + + /* Don't create thermal hwmon interface if no sensors present */ + rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)); + if (rc) + goto exit; + + /* Allocation space for max attributes + * max num sensors * values (loc, temp, max, caution) + */ + n_attrs = E1000_MAX_SENSORS * 4; + igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), + GFP_KERNEL); + if (!igb_hwmon->hwmon_list) { + rc = -ENOMEM; + goto err; + } + + igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev); + if (IS_ERR(igb_hwmon->device)) { + rc = PTR_ERR(igb_hwmon->device); + goto err; + } + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + + /* Only create hwmon sysfs entries for sensors that have + * meaningful data. + */ + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) + continue; + + /* Bail if any hwmon attr struct fails to initialize */ + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION); + rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); + rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); + rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); + if (rc) + goto err; + } + + goto exit; + +err: + igb_sysfs_del_adapter(adapter); +exit: + return rc; +} +#endif diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 31cfe2ec75df..ed79a1c53b59 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -57,6 +57,7 @@ #ifdef CONFIG_IGB_DCA #include <linux/dca.h> #endif +#include <linux/i2c.h> #include "igb.h" #define MAJ 4 @@ -68,7 +69,8 @@ char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver"; -static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation."; +static const char igb_copyright[] = + "Copyright (c) 2007-2013 Intel Corporation."; static const struct e1000_info *igb_info_tbl[] = { [board_82575] = &e1000_82575_info, @@ -193,6 +195,7 @@ static const struct dev_pm_ops igb_pm_ops = { }; #endif static void igb_shutdown(struct pci_dev *); +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); #ifdef CONFIG_IGB_DCA static int igb_notify_dca(struct notifier_block *, unsigned long, void *); static struct notifier_block dca_notifier = { @@ -234,6 +237,7 @@ static struct pci_driver igb_driver = { .driver.pm = &igb_pm_ops, #endif .shutdown = igb_shutdown, + .sriov_configure = igb_pci_sriov_configure, .err_handler = &igb_err_handler }; @@ -565,6 +569,91 @@ exit: return; } +/* igb_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + */ +static int igb_get_i2c_data(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return ((i2cctl & E1000_I2C_DATA_IN) != 0); +} + +/* igb_set_i2c_data - Sets the I2C data bit + * @data: pointer to hardware structure + * @state: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + */ +static void igb_set_i2c_data(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) + i2cctl |= E1000_I2C_DATA_OUT; + else + i2cctl &= ~E1000_I2C_DATA_OUT; + + i2cctl &= ~E1000_I2C_DATA_OE_N; + i2cctl |= E1000_I2C_CLK_OE_N; + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); + +} + +/* igb_set_i2c_clk - Sets the I2C SCL clock + * @data: pointer to hardware structure + * @state: state to set clock + * + * Sets the I2C clock line to state + */ +static void igb_set_i2c_clk(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) { + i2cctl |= E1000_I2C_CLK_OUT; + i2cctl &= ~E1000_I2C_CLK_OE_N; + } else { + i2cctl &= ~E1000_I2C_CLK_OUT; + i2cctl &= ~E1000_I2C_CLK_OE_N; + } + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); +} + +/* igb_get_i2c_clk - Gets the I2C SCL clock state + * @data: pointer to hardware structure + * + * Gets the I2C clock state + */ +static int igb_get_i2c_clk(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return ((i2cctl & E1000_I2C_CLK_IN) != 0); +} + +static const struct i2c_algo_bit_data igb_i2c_algo = { + .setsda = igb_set_i2c_data, + .setscl = igb_set_i2c_clk, + .getsda = igb_get_i2c_data, + .getscl = igb_get_i2c_clk, + .udelay = 5, + .timeout = 20, +}; + /** * igb_get_hw_dev - return device * used by hardware layer to print debugging information @@ -1708,6 +1797,18 @@ void igb_reset(struct igb_adapter *adapter) igb_force_mac_fc(hw); igb_init_dmac(adapter, pba); +#ifdef CONFIG_IGB_HWMON + /* Re-initialize the thermal sensor on i350 devices. */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (mac->type == e1000_i350 && hw->bus.func == 0) { + /* If present, re-initialize the external thermal sensor + * interface. + */ + if (adapter->ets) + mac->ops.init_thermal_sensor_thresh(hw); + } + } +#endif if (!netif_running(adapter->netdev)) igb_power_down_link(adapter); @@ -1822,6 +1923,37 @@ void igb_set_fw_version(struct igb_adapter *adapter) return; } +static const struct i2c_board_info i350_sensor_info = { + I2C_BOARD_INFO("i350bb", 0Xf8), +}; + +/* igb_init_i2c - Init I2C interface + * @adapter: pointer to adapter structure + * + */ +static s32 igb_init_i2c(struct igb_adapter *adapter) +{ + s32 status = E1000_SUCCESS; + + /* I2C interface supported on i350 devices */ + if (adapter->hw.mac.type != e1000_i350) + return E1000_SUCCESS; + + /* Initialize the i2c bus which is controlled by the registers. + * This bus will use the i2c_algo_bit structue that implements + * the protocol through toggling of the 4 bits in the register. + */ + adapter->i2c_adap.owner = THIS_MODULE; + adapter->i2c_algo = igb_i2c_algo; + adapter->i2c_algo.data = adapter; + adapter->i2c_adap.algo_data = &adapter->i2c_algo; + adapter->i2c_adap.dev.parent = &adapter->pdev->dev; + strlcpy(adapter->i2c_adap.name, "igb BB", + sizeof(adapter->i2c_adap.name)); + status = i2c_bit_add_bus(&adapter->i2c_adap); + return status; +} + /** * igb_probe - Device Initialization Routine * @pdev: PCI device information struct @@ -2022,9 +2154,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "NVM Read Error\n"); memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); - memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); - if (!is_valid_ether_addr(netdev->perm_addr)) { + if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address\n"); err = -EIO; goto err_eeprom; @@ -2115,6 +2246,13 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* reset the hardware with the new settings */ igb_reset(adapter); + /* Init the I2C interface */ + err = igb_init_i2c(adapter); + if (err) { + dev_err(&pdev->dev, "failed to init i2c interface\n"); + goto err_eeprom; + } + /* let the f/w know that the h/w is now under the control of the * driver. */ igb_get_hw_control(adapter); @@ -2135,7 +2273,27 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } #endif +#ifdef CONFIG_IGB_HWMON + /* Initialize the thermal sensor on i350 devices. */ + if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { + u16 ets_word; + /* + * Read the NVM to determine if this i350 device supports an + * external thermal sensor. + */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); + if (ets_word != 0x0000 && ets_word != 0xFFFF) + adapter->ets = true; + else + adapter->ets = false; + if (igb_sysfs_init(adapter)) + dev_err(&pdev->dev, + "failed to allocate sysfs resources\n"); + } else { + adapter->ets = false; + } +#endif /* do hw tstamp init after resetting */ igb_ptp_init(adapter); @@ -2176,6 +2334,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_register: igb_release_hw_control(adapter); + memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); err_eeprom: if (!igb_check_reset_block(hw)) igb_reset_phy(hw); @@ -2196,6 +2355,111 @@ err_dma: return err; } +#ifdef CONFIG_PCI_IOV +static int igb_disable_sriov(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* reclaim resources allocated to VFs */ + if (adapter->vf_data) { + /* disable iov and allow time for transactions to clear */ + if (igb_vfs_are_assigned(adapter)) { + dev_warn(&pdev->dev, + "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); + return -EPERM; + } else { + pci_disable_sriov(pdev); + msleep(500); + } + + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&pdev->dev, "IOV Disabled\n"); + + /* Re-enable DMA Coalescing flag since IOV is turned off */ + adapter->flags |= IGB_FLAG_DMAC; + } + + return 0; +} + +static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + int old_vfs = pci_num_vf(pdev); + int err = 0; + int i; + + if (!num_vfs) + goto out; + else if (old_vfs && old_vfs == num_vfs) + goto out; + else if (old_vfs && old_vfs != num_vfs) + err = igb_disable_sriov(pdev); + + if (err) + goto out; + + if (num_vfs > 7) { + err = -EPERM; + goto out; + } + + adapter->vfs_allocated_count = num_vfs; + + adapter->vf_data = kcalloc(adapter->vfs_allocated_count, + sizeof(struct vf_data_storage), GFP_KERNEL); + + /* if allocation failed then we do not support SR-IOV */ + if (!adapter->vf_data) { + adapter->vfs_allocated_count = 0; + dev_err(&pdev->dev, + "Unable to allocate memory for VF Data Storage\n"); + err = -ENOMEM; + goto out; + } + + err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + + dev_info(&pdev->dev, "%d VFs allocated\n", + adapter->vfs_allocated_count); + for (i = 0; i < adapter->vfs_allocated_count; i++) + igb_vf_configure(adapter, i); + + /* DMA Coalescing is not supported in IOV mode. */ + adapter->flags &= ~IGB_FLAG_DMAC; + goto out; + +err_out: + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; +out: + return err; +} + +#endif +/* + * igb_remove_i2c - Cleanup I2C interface + * @adapter: pointer to adapter structure + * + */ +static void igb_remove_i2c(struct igb_adapter *adapter) +{ + + /* free the adapter bus structure */ + i2c_del_adapter(&adapter->i2c_adap); +} + /** * igb_remove - Device Removal Routine * @pdev: PCI device information struct @@ -2212,8 +2476,11 @@ static void igb_remove(struct pci_dev *pdev) struct e1000_hw *hw = &adapter->hw; pm_runtime_get_noresume(&pdev->dev); +#ifdef CONFIG_IGB_HWMON + igb_sysfs_exit(adapter); +#endif + igb_remove_i2c(adapter); igb_ptp_stop(adapter); - /* * The watchdog timer may be rescheduled, so explicitly * disable watchdog from being rescheduled. @@ -2243,23 +2510,7 @@ static void igb_remove(struct pci_dev *pdev) igb_clear_interrupt_scheme(adapter); #ifdef CONFIG_PCI_IOV - /* reclaim resources allocated to VFs */ - if (adapter->vf_data) { - /* disable iov and allow time for transactions to clear */ - if (igb_vfs_are_assigned(adapter)) { - dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); - } else { - pci_disable_sriov(pdev); - msleep(500); - } - - kfree(adapter->vf_data); - adapter->vf_data = NULL; - wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); - wrfl(); - msleep(100); - dev_info(&pdev->dev, "IOV Disabled\n"); - } + igb_disable_sriov(pdev); #endif iounmap(hw->hw_addr); @@ -2290,103 +2541,22 @@ static void igb_probe_vfs(struct igb_adapter *adapter) #ifdef CONFIG_PCI_IOV struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; - int old_vfs = pci_num_vf(adapter->pdev); - int i; /* Virtualization features not supported on i210 family. */ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) return; - if (old_vfs) { - dev_info(&pdev->dev, "%d pre-allocated VFs found - override " - "max_vfs setting of %d\n", old_vfs, max_vfs); - adapter->vfs_allocated_count = old_vfs; - } - - if (!adapter->vfs_allocated_count) - return; - - adapter->vf_data = kcalloc(adapter->vfs_allocated_count, - sizeof(struct vf_data_storage), GFP_KERNEL); - - /* if allocation failed then we do not support SR-IOV */ - if (!adapter->vf_data) { - adapter->vfs_allocated_count = 0; - dev_err(&pdev->dev, "Unable to allocate memory for VF " - "Data Storage\n"); - goto out; - } - - if (!old_vfs) { - if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) - goto err_out; - } - dev_info(&pdev->dev, "%d VFs allocated\n", - adapter->vfs_allocated_count); - for (i = 0; i < adapter->vfs_allocated_count; i++) - igb_vf_configure(adapter, i); + igb_enable_sriov(pdev, max_vfs); + pci_sriov_set_totalvfs(pdev, 7); - /* DMA Coalescing is not supported in IOV mode. */ - adapter->flags &= ~IGB_FLAG_DMAC; - goto out; -err_out: - kfree(adapter->vf_data); - adapter->vf_data = NULL; - adapter->vfs_allocated_count = 0; -out: - return; #endif /* CONFIG_PCI_IOV */ } -/** - * igb_sw_init - Initialize general software structures (struct igb_adapter) - * @adapter: board private structure to initialize - * - * igb_sw_init initializes the Adapter private data structure. - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). - **/ -static int igb_sw_init(struct igb_adapter *adapter) +static void igb_init_queue_configuration(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; u32 max_rss_queues; - pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); - - /* set default ring sizes */ - adapter->tx_ring_count = IGB_DEFAULT_TXD; - adapter->rx_ring_count = IGB_DEFAULT_RXD; - - /* set default ITR values */ - adapter->rx_itr_setting = IGB_DEFAULT_ITR; - adapter->tx_itr_setting = IGB_DEFAULT_ITR; - - /* set default work limits */ - adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; - - adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + - VLAN_HLEN; - adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; - - spin_lock_init(&adapter->stats64_lock); -#ifdef CONFIG_PCI_IOV - switch (hw->mac.type) { - case e1000_82576: - case e1000_i350: - if (max_vfs > 7) { - dev_warn(&pdev->dev, - "Maximum of 7 VFs per PF, using max\n"); - adapter->vfs_allocated_count = 7; - } else - adapter->vfs_allocated_count = max_vfs; - break; - default: - break; - } -#endif /* CONFIG_PCI_IOV */ - /* Determine the maximum number of RSS queues supported. */ switch (hw->mac.type) { case e1000_i211: @@ -2445,11 +2615,64 @@ static int igb_sw_init(struct igb_adapter *adapter) adapter->flags |= IGB_FLAG_QUEUE_PAIRS; break; } +} + +/** + * igb_sw_init - Initialize general software structures (struct igb_adapter) + * @adapter: board private structure to initialize + * + * igb_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int igb_sw_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGB_DEFAULT_TXD; + adapter->rx_ring_count = IGB_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGB_DEFAULT_ITR; + adapter->tx_itr_setting = IGB_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; + + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + spin_lock_init(&adapter->stats64_lock); +#ifdef CONFIG_PCI_IOV + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + if (max_vfs > 7) { + dev_warn(&pdev->dev, + "Maximum of 7 VFs per PF, using max\n"); + adapter->vfs_allocated_count = 7; + } else + adapter->vfs_allocated_count = max_vfs; + if (adapter->vfs_allocated_count) + dev_warn(&pdev->dev, + "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); + break; + default: + break; + } +#endif /* CONFIG_PCI_IOV */ + + igb_init_queue_configuration(adapter); /* Setup and initialize a copy of the hw vlan table array */ - adapter->shadow_vfta = kzalloc(sizeof(u32) * - E1000_VLAN_FILTER_TBL_SIZE, - GFP_ATOMIC); + adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), + GFP_ATOMIC); /* This call may decrease the number of queues */ if (igb_init_interrupt_scheme(adapter, true)) { @@ -3131,6 +3354,20 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, wr32(E1000_RXDCTL(reg_idx), rxdctl); } +static void igb_set_rx_buffer_len(struct igb_adapter *adapter, + struct igb_ring *rx_ring) +{ +#define IGB_MAX_BUILD_SKB_SIZE \ + (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \ + (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN)) + + /* set build_skb flag */ + if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE) + set_ring_build_skb_enabled(rx_ring); + else + clear_ring_build_skb_enabled(rx_ring); +} + /** * igb_configure_rx - Configure receive Unit after Reset * @adapter: board private structure @@ -3150,8 +3387,11 @@ static void igb_configure_rx(struct igb_adapter *adapter) /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ - for (i = 0; i < adapter->num_rx_queues; i++) - igb_configure_rx_ring(adapter, adapter->rx_ring[i]); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *rx_ring = adapter->rx_ring[i]; + igb_set_rx_buffer_len(adapter, rx_ring); + igb_configure_rx_ring(adapter, rx_ring); + } } /** @@ -3768,6 +4008,7 @@ static void igb_watchdog_task(struct work_struct *work) } igb_spoof_check(adapter); + igb_ptp_rx_hang(adapter); /* Reset the timer */ if (!test_bit(__IGB_DOWN, &adapter->state)) @@ -4193,13 +4434,6 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring, tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } -/* - * The largest size we can write to the descriptor is 65535. In order to - * maintain a power of two alignment we have to limit ourselves to 32K. - */ -#define IGB_MAX_TXD_PWR 15 -#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) - static void igb_tx_map(struct igb_ring *tx_ring, struct igb_tx_buffer *first, const u8 hdr_len) @@ -4368,15 +4602,25 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, struct igb_tx_buffer *first; int tso; u32 tx_flags = 0; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); __be16 protocol = vlan_get_protocol(skb); u8 hdr_len = 0; - /* need: 1 descriptor per page, + /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, - * + 1 desc for skb->data, * + 1 desc for context descriptor, - * otherwise try next time */ - if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) { + * otherwise try next time + */ + if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) { + unsigned short f; + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + } else { + count += skb_shinfo(skb)->nr_frags; + } + + if (igb_maybe_stop_tx(tx_ring, count + 3)) { /* this is a hard error */ return NETDEV_TX_BUSY; } @@ -4387,12 +4631,15 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; + skb_tx_timestamp(skb); + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && !(adapter->ptp_tx_skb))) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGB_TX_FLAGS_TSTAMP; adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; if (adapter->hw.mac.type == e1000_82576) schedule_work(&adapter->ptp_tx_work); } @@ -4415,7 +4662,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, igb_tx_map(tx_ring, first, hdr_len); /* Make sure there is space in the ring for the next send. */ - igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); return NETDEV_TX_OK; @@ -4969,7 +5216,7 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf) { unsigned char mac_addr[ETH_ALEN]; - eth_random_addr(mac_addr); + eth_zero_addr(mac_addr); igb_set_vf_mac(adapter, vf, mac_addr); return 0; @@ -5322,9 +5569,9 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) { unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - /* generate a new mac address as we were hotplug removed/added */ + /* clear mac address as we were hotplug removed/added */ if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) - eth_random_addr(vf_mac); + eth_zero_addr(vf_mac); /* process remaining reset events */ igb_vf_reset(adapter, vf); @@ -5703,7 +5950,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) break; /* prevent any other reads prior to eop_desc */ - rmb(); + read_barrier_depends(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) @@ -5819,9 +6066,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) } } +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && - igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) { + igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -5870,6 +6118,41 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, DMA_FROM_DEVICE); } +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, + struct page *page, + unsigned int truesize) +{ + /* avoid re-using remote pages */ + if (unlikely(page_to_nid(page) != numa_node_id())) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= IGB_RX_BUFSZ; + + /* since we are the only owner of the page and we need to + * increment it, just set the value to 2 in order to avoid + * an unnecessary locked operation + */ + atomic_set(&page->_count, 2); +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) + return false; + + /* bump ref count on page before it is given to the stack */ + get_page(page); +#endif + + return true; +} + /** * igb_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on @@ -5892,6 +6175,11 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, { struct page *page = rx_buffer->page; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = IGB_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { unsigned char *va = page_address(page) + rx_buffer->page_offset; @@ -5914,38 +6202,88 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, } skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, IGB_RX_BUFSZ); + rx_buffer->page_offset, size, truesize); - /* avoid re-using remote pages */ - if (unlikely(page_to_nid(page) != numa_node_id())) - return false; + return igb_can_reuse_rx_page(rx_buffer, page, truesize); +} +static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) +{ + struct igb_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + void *page_addr; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) - return false; + unsigned int truesize = IGB_RX_BUFSZ; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(NET_SKB_PAD + + NET_IP_ALIGN + + size); +#endif - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= IGB_RX_BUFSZ; + /* If we spanned a buffer we have a huge mess so test for it */ + BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); - /* - * since we are the only owner of the page and we need to - * increment it, just set the value to 2 in order to avoid - * an unnecessary locked operation - */ - atomic_set(&page->_count, 2); -#else - /* move offset up to the next cache line */ - rx_buffer->page_offset += SKB_DATA_ALIGN(size); + /* Guarantee this function can be used by verifying buffer sizes */ + BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD + + NET_IP_ALIGN + + IGB_TS_HDR_LEN + + ETH_FRAME_LEN + + ETH_FCS_LEN)); - if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) - return false; + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); - /* bump ref count on page before it is given to the stack */ - get_page(page); + page_addr = page_address(page) + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN); #endif - return true; + /* build an skb to around the page buffer */ + skb = build_skb(page_addr, truesize); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_failed++; + return NULL; + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + IGB_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* update pointers within the skb to store the data */ + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); + __skb_put(skb, size); + + /* pull timestamp out of packet data */ + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); + __skb_pull(skb, IGB_TS_HDR_LEN); + } + + if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) { + /* hand second half of page back to the ring */ + igb_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->dma = 0; + rx_buffer->page = NULL; + + return skb; } static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, @@ -5957,13 +6295,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - /* - * This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * RXD_STAT_DD bit is set - */ - rmb(); - page = rx_buffer->page; prefetchw(page); @@ -6363,8 +6694,17 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) break; + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ + rmb(); + /* retrieve a buffer from the ring */ - skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); + if (ring_uses_build_skb(rx_ring)) + skb = igb_build_rx_buffer(rx_ring, rx_desc); + else + skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); /* exit if we failed to retrieve a buffer */ if (!skb) @@ -6451,6 +6791,14 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, return true; } +static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) +{ + if (ring_uses_build_skb(rx_ring)) + return NET_SKB_PAD + NET_IP_ALIGN; + else + return 0; +} + /** * igb_alloc_rx_buffers - Replace used receive buffers; packet split * @adapter: address of board private structure @@ -6477,7 +6825,9 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) * Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + + bi->page_offset + + igb_rx_offset(rx_ring)); rx_desc++; bi++; @@ -6903,6 +7253,72 @@ static void igb_shutdown(struct pci_dev *pdev) } } +#ifdef CONFIG_PCI_IOV +static int igb_sriov_reinit(struct pci_dev *dev) +{ + struct net_device *netdev = pci_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + rtnl_lock(); + + if (netif_running(netdev)) + igb_close(netdev); + + igb_clear_interrupt_scheme(adapter); + + igb_init_queue_configuration(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + igb_open(netdev); + + rtnl_unlock(); + + return 0; +} + +static int igb_pci_disable_sriov(struct pci_dev *dev) +{ + int err = igb_disable_sriov(dev); + + if (!err) + err = igb_sriov_reinit(dev); + + return err; +} + +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) +{ + int err = igb_enable_sriov(dev, num_vfs); + + if (err) + goto out; + + err = igb_sriov_reinit(dev); + if (!err) + return num_vfs; + +out: + return err; +} + +#endif +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + if (num_vfs == 0) + return igb_pci_disable_sriov(dev); + else + return igb_pci_enable_sriov(dev, num_vfs); +#endif + return 0; +} + #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs @@ -7308,4 +7724,133 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) } } +static DEFINE_SPINLOCK(i2c_clients_lock); + +/* igb_get_i2c_client - returns matching client + * in adapters's client list. + * @adapter: adapter struct + * @dev_addr: device address of i2c needed. + */ +static struct i2c_client * +igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr) +{ + ulong flags; + struct igb_i2c_client_list *client_list; + struct i2c_client *client = NULL; + struct i2c_board_info client_info = { + I2C_BOARD_INFO("igb", 0x00), + }; + + spin_lock_irqsave(&i2c_clients_lock, flags); + client_list = adapter->i2c_clients; + + /* See if we already have an i2c_client */ + while (client_list) { + if (client_list->client->addr == (dev_addr >> 1)) { + client = client_list->client; + goto exit; + } else { + client_list = client_list->next; + } + } + + /* no client_list found, create a new one */ + client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC); + if (client_list == NULL) + goto exit; + + /* dev_addr passed to us is left-shifted by 1 bit + * i2c_new_device call expects it to be flush to the right. + */ + client_info.addr = dev_addr >> 1; + client_info.platform_data = adapter; + client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info); + if (client_list->client == NULL) { + dev_info(&adapter->pdev->dev, + "Failed to create new i2c device..\n"); + goto err_no_client; + } + + /* insert new client at head of list */ + client_list->next = adapter->i2c_clients; + adapter->i2c_clients = client_list; + + client = client_list->client; + goto exit; + +err_no_client: + kfree(client_list); +exit: + spin_unlock_irqrestore(&i2c_clients_lock, flags); + return client; +} + +/* igb_read_i2c_byte - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + */ +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); + s32 status; + u16 swfw_mask = 0; + + if (!this_client) + return E1000_ERR_I2C; + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) + != E1000_SUCCESS) + return E1000_ERR_SWFW_SYNC; + + status = i2c_smbus_read_byte_data(this_client, byte_offset); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status < 0) + return E1000_ERR_I2C; + else { + *data = status; + return E1000_SUCCESS; + } +} + +/* igb_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + */ +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); + s32 status; + u16 swfw_mask = E1000_SWFW_PHY0_SM; + + if (!this_client) + return E1000_ERR_I2C; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) + return E1000_ERR_SWFW_SYNC; + status = i2c_smbus_write_byte_data(this_client, byte_offset, data); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status) + return E1000_ERR_I2C; + else + return E1000_SUCCESS; + +} /* igb_main.c */ diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index ab3429729bde..0987822359f0 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -20,6 +20,7 @@ #include <linux/module.h> #include <linux/device.h> #include <linux/pci.h> +#include <linux/ptp_classify.h> #include "igb.h" @@ -70,6 +71,7 @@ */ #define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) +#define IGB_PTP_TX_TIMEOUT (HZ * 15) #define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) #define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1) #define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) @@ -396,6 +398,15 @@ void igb_ptp_tx_work(struct work_struct *work) if (!adapter->ptp_tx_skb) return; + if (time_is_before_jiffies(adapter->ptp_tx_start + + IGB_PTP_TX_TIMEOUT)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang"); + return; + } + tsynctxctl = rd32(E1000_TSYNCTXCTL); if (tsynctxctl & E1000_TSYNCTXCTL_VALID) igb_ptp_tx_hwtstamp(adapter); @@ -419,6 +430,51 @@ static void igb_ptp_overflow_check(struct work_struct *work) } /** + * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * This watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + */ +void igb_ptp_rx_hang(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct igb_ring *rx_ring; + u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); + unsigned long rx_event; + int n; + + if (hw->mac.type != e1000_82576) + return; + + /* If we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* Determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* Only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(E1000_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; + adapter->rx_hwtstamp_cleared++; + dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang"); + } +} + +/** * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp * @adapter: Board private structure. * @@ -643,7 +699,6 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, else wr32(E1000_ETQF(3), 0); -#define PTP_PORT 319 /* L4 Queue Filter[3]: filter by destination port and protocol */ if (is_l4) { u32 ftqf = (IPPROTO_UDP /* UDP */ @@ -652,12 +707,12 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, | E1000_FTQF_MASK); /* mask all inputs */ ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ - wr32(E1000_IMIR(3), htons(PTP_PORT)); + wr32(E1000_IMIR(3), htons(PTP_EV_PORT)); wr32(E1000_IMIREXT(3), (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); if (hw->mac.type == e1000_82576) { /* enable source port check */ - wr32(E1000_SPQF(3), htons(PTP_PORT)); + wr32(E1000_SPQF(3), htons(PTP_EV_PORT)); ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; } wr32(E1000_FTQF(3), ftqf); @@ -801,6 +856,10 @@ void igb_ptp_stop(struct igb_adapter *adapter) } cancel_work_sync(&adapter->ptp_tx_work); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index fdca7b672776..a1463e3d14c0 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -127,8 +127,8 @@ struct igbvf_buffer { /* Tx */ struct { unsigned long time_stamp; + union e1000_adv_tx_desc *next_to_watch; u16 length; - u16 next_to_watch; u16 mapped_as_page; }; /* Rx */ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 277f5dfe3d90..d60cd4393415 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -797,20 +797,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) struct sk_buff *skb; union e1000_adv_tx_desc *tx_desc, *eop_desc; unsigned int total_bytes = 0, total_packets = 0; - unsigned int i, eop, count = 0; + unsigned int i, count = 0; bool cleaned = false; i = tx_ring->next_to_clean; - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); + buffer_info = &tx_ring->buffer_info[i]; + eop_desc = buffer_info->next_to_watch; + + do { + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + buffer_info->next_to_watch = NULL; - while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && - (count < tx_ring->count)) { - rmb(); /* read buffer_info after eop_desc status */ for (cleaned = false; !cleaned; count++) { tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); - buffer_info = &tx_ring->buffer_info[i]; - cleaned = (i == eop); + cleaned = (tx_desc == eop_desc); skb = buffer_info->skb; if (skb) { @@ -831,10 +842,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) i++; if (i == tx_ring->count) i = 0; + + buffer_info = &tx_ring->buffer_info[i]; } - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); - } + + eop_desc = buffer_info->next_to_watch; + } while (count < tx_ring->count); tx_ring->next_to_clean = i; @@ -1399,12 +1412,10 @@ static void igbvf_set_multi(struct net_device *netdev) int i; if (!netdev_mc_empty(netdev)) { - mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); - if (!mta_list) { - dev_err(&adapter->pdev->dev, - "failed to allocate multicast filter list\n"); + mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN, + GFP_ATOMIC); + if (!mta_list) return; - } } /* prepare a packed array of only addresses. */ @@ -1738,7 +1749,6 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - netdev->addr_assign_type &= ~NET_ADDR_RANDOM; return 0; } @@ -1964,7 +1974,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter, context_desc->seqnum_seed = 0; buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; buffer_info->dma = 0; i++; if (i == tx_ring->count) @@ -2024,7 +2033,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, context_desc->mss_l4len_idx = 0; buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; buffer_info->dma = 0; i++; if (i == tx_ring->count) @@ -2064,8 +2072,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, - struct sk_buff *skb, - unsigned int first) + struct sk_buff *skb) { struct igbvf_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; @@ -2080,7 +2087,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, buffer_info->length = len; /* set time_stamp *before* dma to help avoid a possible race */ buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; buffer_info->mapped_as_page = false; buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); @@ -2103,7 +2109,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); buffer_info->length = len; buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; buffer_info->mapped_as_page = true; buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, DMA_TO_DEVICE); @@ -2112,7 +2117,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, } tx_ring->buffer_info[i].skb = skb; - tx_ring->buffer_info[first].next_to_watch = i; return ++count; @@ -2123,7 +2127,6 @@ dma_error: buffer_info->dma = 0; buffer_info->time_stamp = 0; buffer_info->length = 0; - buffer_info->next_to_watch = 0; buffer_info->mapped_as_page = false; if (count) count--; @@ -2142,7 +2145,8 @@ dma_error: static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, - int tx_flags, int count, u32 paylen, + int tx_flags, int count, + unsigned int first, u32 paylen, u8 hdr_len) { union e1000_adv_tx_desc *tx_desc = NULL; @@ -2192,6 +2196,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, * such as IA-64). */ wmb(); + tx_ring->buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; writel(i, adapter->hw.hw_addr + tx_ring->tail); /* we need this if more than one processor can write to our tail @@ -2258,11 +2263,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, * count reflects descriptors mapped, if 0 then mapping error * has occurred and we need to rewind the descriptor queue */ - count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); + count = igbvf_tx_map_adv(adapter, tx_ring, skb); if (count) { igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, - skb->len, hdr_len); + first, skb->len, hdr_len); /* Make sure there is space in the ring for the next send. */ igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); } else { @@ -2736,30 +2741,24 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = hw->mac.ops.reset_hw(hw); if (err) { dev_info(&pdev->dev, - "PF still in reset state, assigning new address." - " Is the PF interface up?\n"); - eth_hw_addr_random(netdev); - memcpy(adapter->hw.mac.addr, netdev->dev_addr, - netdev->addr_len); + "PF still in reset state. Is the PF interface up?\n"); } else { err = hw->mac.ops.read_mac_addr(hw); - if (err) { - dev_err(&pdev->dev, "Error reading MAC address\n"); - goto err_hw_init; - } + if (err) + dev_info(&pdev->dev, "Error reading MAC address.\n"); + else if (is_zero_ether_addr(adapter->hw.mac.addr)) + dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); memcpy(netdev->dev_addr, adapter->hw.mac.addr, - netdev->addr_len); + netdev->addr_len); } if (!is_valid_ether_addr(netdev->dev_addr)) { - dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", - netdev->dev_addr); - err = -EIO; - goto err_hw_init; + dev_info(&pdev->dev, "Assigning random MAC address.\n"); + eth_hw_addr_random(netdev); + memcpy(adapter->hw.mac.addr, netdev->dev_addr, + netdev->addr_len); } - memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); - setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, (unsigned long) adapter); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index ae96c10251be..ea4808373435 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -500,9 +500,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); - memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); - if (!is_valid_ether_addr(netdev->perm_addr)) { + if (!is_valid_ether_addr(netdev->dev_addr)) { netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n"); err = -EIO; goto err_eeprom; @@ -709,11 +708,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) size = sizeof(struct ixgb_buffer) * txdr->count; txdr->buffer_info = vzalloc(size); - if (!txdr->buffer_info) { - netif_err(adapter, probe, adapter->netdev, - "Unable to allocate transmit descriptor ring memory\n"); + if (!txdr->buffer_info) return -ENOMEM; - } /* round up to nearest 4K */ @@ -798,11 +794,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) size = sizeof(struct ixgb_buffer) * rxdr->count; rxdr->buffer_info = vzalloc(size); - if (!rxdr->buffer_info) { - netif_err(adapter, probe, adapter->netdev, - "Unable to allocate receive descriptor ring\n"); + if (!rxdr->buffer_info) return -ENOMEM; - } /* Round up to nearest 4K */ diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 687c83d1bdab..be2989e60009 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel 10 Gigabit PCI Express Linux driver -# Copyright(c) 1999 - 2012 Intel Corporation. +# Copyright(c) 1999 - 2013 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 8e786764c60e..a8e10cff7a89 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -35,6 +35,7 @@ #include <linux/cpumask.h> #include <linux/aer.h> #include <linux/if_vlan.h> +#include <linux/jiffies.h> #include <linux/clocksource.h> #include <linux/net_tstamp.h> @@ -91,21 +92,26 @@ */ #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 -#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) - /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define IXGBE_TX_FLAGS_CSUM (u32)(1) -#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1) -#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2) -#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3) -#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4) -#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5) -#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) -#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) -#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8) -#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9) +enum ixgbe_tx_flags { + /* cmd_type flags */ + IXGBE_TX_FLAGS_HW_VLAN = 0x01, + IXGBE_TX_FLAGS_TSO = 0x02, + IXGBE_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IXGBE_TX_FLAGS_CC = 0x08, + IXGBE_TX_FLAGS_IPV4 = 0x10, + IXGBE_TX_FLAGS_CSUM = 0x20, + + /* software defined flags */ + IXGBE_TX_FLAGS_SW_VLAN = 0x40, + IXGBE_TX_FLAGS_FCOE = 0x80, +}; + +/* VLAN info */ #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -150,7 +156,7 @@ struct vf_macvlans { /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) -#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ @@ -195,6 +201,7 @@ struct ixgbe_rx_queue_stats { enum ixgbe_ring_state_t { __IXGBE_TX_FDIR_INIT_DONE, + __IXGBE_TX_XPS_INIT_DONE, __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, __IXGBE_RX_RSC_ENABLED, @@ -224,6 +231,7 @@ struct ixgbe_ring { struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info; }; + unsigned long last_rx_timestamp; unsigned long state; u8 __iomem *tail; dma_addr_t dma; /* phys. address of descriptor ring */ @@ -271,15 +279,10 @@ enum ixgbe_ring_f_enum { #define IXGBE_MAX_RSS_INDICES 16 #define IXGBE_MAX_VMDQ_INDICES 64 -#define IXGBE_MAX_FDIR_INDICES 64 -#ifdef IXGBE_FCOE +#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ #define IXGBE_MAX_FCOE_INDICES 8 -#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) -#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) -#else -#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES -#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES -#endif /* IXGBE_FCOE */ +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ u16 indices; /* current value of indices */ @@ -573,11 +576,14 @@ struct ixgbe_adapter { struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + unsigned long ptp_tx_start; unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; spinlock_t tmreg_lock; struct cyclecounter cc; struct timecounter tc; - int rx_hwtstamp_filter; u32 base_incval; /* SR-IOV */ @@ -614,6 +620,7 @@ enum ixgbe_state_t { __IXGBE_DOWN, __IXGBE_SERVICE_SCHED, __IXGBE_IN_SFP_INIT, + __IXGBE_READ_I2C, }; struct ixgbe_cb { @@ -694,8 +701,8 @@ extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); extern void ixgbe_set_rx_mode(struct net_device *netdev); #ifdef CONFIG_IXGBE_DCB extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); -extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); #endif +extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); extern void ixgbe_do_reset(struct net_device *netdev); #ifdef CONFIG_IXGBE_HWMON @@ -742,15 +749,32 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter); extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); -extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb); -extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb); +extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); +extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb); +static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) + return; + + __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); + + /* + * Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + rx_ring->last_rx_timestamp = jiffies; +} + extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr, int cmd); extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); +#ifdef CONFIG_PCI_IOV +void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); +#endif #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 42537336110c..d0113fc97b6f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -41,7 +41,6 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); @@ -633,15 +632,15 @@ out: * ixgbe_setup_mac_link_82598 - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if auto-negotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { + bool autoneg = false; s32 status = 0; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); @@ -685,20 +684,18 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true if waiting is needed to complete * * Sets the link speed in the AUTOC register in the MAC and restarts link. **/ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete) { s32 status; /* Setup the PHY according to input speed */ - status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); /* Set up MAC */ ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); @@ -1006,15 +1003,16 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) } /** - * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. + * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. * @hw: pointer to hardware structure - * @byte_offset: EEPROM byte offset to read + * @dev_addr: address to read from + * @byte_offset: byte offset to read from dev_addr * @eeprom_data: value read * - * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + * Performs 8 byte read operation to SFP module's data over I2C interface. **/ -static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data) +static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, + u8 byte_offset, u8 *eeprom_data) { s32 status = 0; u16 sfp_addr = 0; @@ -1028,7 +1026,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, * 0xC30D. These registers are used to talk to the SFP+ * module's EEPROM through the SDA/SCL (I2C) interface. */ - sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset; + sfp_addr = (dev_addr << 8) + byte_offset; sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, @@ -1060,7 +1058,6 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, *eeprom_data = (u8)(sfp_data >> 8); } else { status = IXGBE_ERR_PHY; - goto out; } out: @@ -1068,6 +1065,36 @@ out: } /** + * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + **/ +static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, + byte_offset, eeprom_data); +} + +/** + * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C + **/ +static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, + byte_offset, sff8472_data); +} + +/** * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type * @hw: pointer to hardware structure * @@ -1300,6 +1327,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = { .write_reg = &ixgbe_write_phy_reg_generic, .setup_link = &ixgbe_setup_phy_link_generic, .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, .check_overtemp = &ixgbe_tn_check_overtemp, }; @@ -1311,4 +1339,3 @@ struct ixgbe_info ixgbe_82598_info = { .eeprom_ops = &eeprom_ops_82598, .phy_ops = &phy_ops_82598, }; - diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 1073aea5da40..203a00c24330 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -45,21 +45,17 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete); static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); @@ -234,13 +230,13 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) * ixgbe_get_link_capabilities_82599 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed - * @negotiation: true when autoneg or autotry is enabled + * @autoneg: true when autoneg or autotry is enabled * * Determines the link capabilities by reading the AUTOC register. **/ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *negotiation) + bool *autoneg) { s32 status = 0; u32 autoc = 0; @@ -251,7 +247,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; + *autoneg = true; goto out; } @@ -268,22 +264,22 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, switch (autoc & IXGBE_AUTOC_LMS_MASK) { case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = false; + *autoneg = false; break; case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_10GB_FULL; - *negotiation = false; + *autoneg = false; break; case IXGBE_AUTOC_LMS_1G_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; + *autoneg = true; break; case IXGBE_AUTOC_LMS_10G_SERIAL: *speed = IXGBE_LINK_SPEED_10GB_FULL; - *negotiation = false; + *autoneg = false; break; case IXGBE_AUTOC_LMS_KX4_KX_KR: @@ -295,7 +291,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; + *autoneg = true; break; case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: @@ -306,12 +302,12 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; + *autoneg = true; break; case IXGBE_AUTOC_LMS_SGMII_1G_100M: *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; - *negotiation = false; + *autoneg = false; break; default: @@ -323,7 +319,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, if (hw->phy.multispeed_fiber) { *speed |= IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; + *autoneg = true; } out: @@ -510,14 +506,12 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete) { s32 status = 0; @@ -527,11 +521,11 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); u32 i = 0; bool link_up = false; - bool negotiation; + bool autoneg = false; /* Mask off requested but non-supported speeds */ status = hw->mac.ops.get_link_capabilities(hw, &link_speed, - &negotiation); + &autoneg); if (status != 0) return status; @@ -564,7 +558,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, status = ixgbe_setup_mac_link_82599(hw, IXGBE_LINK_SPEED_10GB_FULL, - autoneg, autoneg_wait_to_complete); if (status != 0) return status; @@ -617,7 +610,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, status = ixgbe_setup_mac_link_82599(hw, IXGBE_LINK_SPEED_1GB_FULL, - autoneg, autoneg_wait_to_complete); if (status != 0) return status; @@ -646,7 +638,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, if (speedcnt > 1) status = ixgbe_setup_mac_link_multispeed_fiber(hw, highest_link_speed, - autoneg, autoneg_wait_to_complete); out: @@ -666,13 +657,12 @@ out: * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed * * Implements the Intel SmartSpeed algorithm. **/ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, + ixgbe_link_speed speed, bool autoneg_wait_to_complete) { s32 status = 0; @@ -703,7 +693,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, /* First, try to get link with full advertisement */ hw->phy.smart_speed_active = false; for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { - status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); if (status != 0) goto out; @@ -738,7 +728,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, /* Turn SmartSpeed on to disable KR support */ hw->phy.smart_speed_active = true; - status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); if (status != 0) goto out; @@ -764,7 +754,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, /* We didn't get link. Turn SmartSpeed back off. */ hw->phy.smart_speed_active = false; - status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); out: @@ -778,14 +768,13 @@ out: * ixgbe_setup_mac_link_82599 - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { s32 status = 0; u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); @@ -799,6 +788,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, u32 i; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; bool got_lock = false; + bool autoneg = false; /* Check to see if speed passed in is supported. */ status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, @@ -911,20 +901,18 @@ out: * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true if waiting is needed to complete * * Restarts link on PHY and MAC based on settings passed in. **/ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete) { s32 status; /* Setup the PHY according to input speed */ - status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); /* Set up MAC */ ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); @@ -2253,6 +2241,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = { .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, .read_i2c_byte = &ixgbe_read_i2c_byte_generic, .write_i2c_byte = &ixgbe_write_i2c_byte_generic, + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, .check_overtemp = &ixgbe_tn_check_overtemp, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 5e68afdd502a..99e472ebaa75 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index f7a0970a251c..bc3948ead6e0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 9bc17c0cb972..1f2c805684dd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h index 1f4108ee154b..1634de8b627f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index 87592b458c9c..ac780770863d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h index ba835708fcac..3164f5453b8f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 4eac80d01857..05e23b80b5e3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h index 4dec47faeb00..a4ef07631d1e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index f1e002d5fa8f..f3d68f9696ba 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -30,6 +30,7 @@ #include <linux/dcbnl.h> #include "ixgbe_dcb_82598.h" #include "ixgbe_dcb_82599.h" +#include "ixgbe_sriov.h" /* Callbacks for DCB netlink in the kernel */ #define BIT_DCB_MODE 0x01 @@ -301,7 +302,6 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; } -#ifdef IXGBE_FCOE static void ixgbe_dcbnl_devreset(struct net_device *dev) { struct ixgbe_adapter *adapter = netdev_priv(dev); @@ -320,7 +320,6 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev) clear_bit(__IXGBE_RESETTING, &adapter->state); } -#endif static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) { @@ -450,7 +449,6 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - u8 rval = 0; if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { switch (tcid) { @@ -461,14 +459,14 @@ static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) *num = adapter->dcb_cfg.num_tcs.pfc_tcs; break; default: - rval = -EINVAL; + return -EINVAL; break; } } else { - rval = -EINVAL; + return -EINVAL; } - return rval; + return 0; } static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) @@ -541,6 +539,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, err = 0; __u8 max_tc = 0; + __u8 map_chg = 0; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; @@ -550,15 +549,22 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, GFP_KERNEL); if (!adapter->ixgbe_ieee_ets) return -ENOMEM; - } - memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); + /* initialize UP2TC mappings to invalid value */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + adapter->ixgbe_ieee_ets->prio_tc[i] = + IEEE_8021QAZ_MAX_TCS; + } for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { if (ets->prio_tc[i] > max_tc) max_tc = ets->prio_tc[i]; + if (ets->prio_tc[i] != adapter->ixgbe_ieee_ets->prio_tc[i]) + map_chg = 1; } + memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); + if (max_tc) max_tc++; @@ -567,6 +573,8 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, if (max_tc != netdev_get_num_tc(dev)) err = ixgbe_setup_tc(dev, max_tc); + else if (map_chg) + ixgbe_dcbnl_devreset(dev); if (err) goto err_out; @@ -643,9 +651,11 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, return err; err = dcb_ieee_setapp(dev, app); + if (err) + return err; #ifdef IXGBE_FCOE - if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); @@ -656,6 +666,23 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, ixgbe_dcbnl_devreset(dev); } #endif + + /* VF devices should use default UP when available */ + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == 0) { + int vf; + + adapter->default_up = app->priority; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->pf_qos) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + app->priority, vf); + } + } + return 0; } @@ -683,6 +710,24 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, ixgbe_dcbnl_devreset(dev); } #endif + /* IF default priority is being removed clear VF default UP */ + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == 0 && adapter->default_up == app->priority) { + int vf; + long unsigned int app_mask = dcb_ieee_getapp_mask(dev, app); + int qos = app_mask ? find_first_bit(&app_mask, 8) : 0; + + adapter->default_up = qos; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->pf_qos) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + qos, vf); + } + } + return err; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index 3504686d3af5..c5933f6dceee 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 326858424345..f4d2e9e3c6d5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -39,6 +39,7 @@ #include <linux/uaccess.h> #include "ixgbe.h" +#include "ixgbe_phy.h" #define IXGBE_ALL_RAR_ENTRIES 16 @@ -156,7 +157,7 @@ static int ixgbe_get_settings(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; ixgbe_link_speed supported_link; u32 link_speed = 0; - bool autoneg; + bool autoneg = false; bool link_up; hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); @@ -333,10 +334,10 @@ static int ixgbe_set_settings(struct net_device *netdev, return err; /* this sets the link speed and restarts auto-neg */ hw->mac.autotry_restart = true; - err = hw->mac.ops.setup_link(hw, advertised, true, true); + err = hw->mac.ops.setup_link(hw, advertised, true); if (err) { e_info(probe, "setup link failed with code %d\n", err); - hw->mac.ops.setup_link(hw, old, true, true); + hw->mac.ops.setup_link(hw, old, true); } } else { /* in this case we currently only support 10Gb/FULL */ @@ -1040,6 +1041,9 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, p = (char *) adapter + ixgbe_gstrings_stats[i].stat_offset; break; + default: + data[i] = 0; + continue; } data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == @@ -1096,8 +1100,10 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_TEST: - memcpy(data, *ixgbe_gstrings_test, - IXGBE_TEST_LEN * ETH_GSTRING_LEN); + for (i = 0; i < IXGBE_TEST_LEN; i++) { + memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } break; case ETH_SS_STATS: for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { @@ -1837,19 +1843,11 @@ static void ixgbe_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; bool if_running = netif_running(netdev); set_bit(__IXGBE_TESTING, &adapter->state); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - /* Offline tests */ - - e_info(hw, "offline testing starting\n"); - - /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result */ - if (ixgbe_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { int i; for (i = 0; i < adapter->num_vfs; i++) { @@ -1870,12 +1868,24 @@ static void ixgbe_diag_test(struct net_device *netdev, } } + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + if (if_running) /* indicate we're in test mode */ dev_close(netdev); - else - ixgbe_reset(adapter); + /* bringing adapter down disables SFP+ optics */ + if (hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (ixgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbe_reset(adapter); e_info(hw, "register testing starting\n"); if (ixgbe_reg_test(adapter, &data[0])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1908,16 +1918,22 @@ static void ixgbe_diag_test(struct net_device *netdev, skip_loopback: ixgbe_reset(adapter); + /* clear testing bit and return adapter to previous state */ clear_bit(__IXGBE_TESTING, &adapter->state); if (if_running) dev_open(netdev); } else { e_info(hw, "online testing starting\n"); + + /* if adapter is down, SFP+ optics will be disabled */ + if (!if_running && hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); + /* Online tests */ if (ixgbe_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; - /* Online tests aren't run; pass by default */ + /* Offline tests aren't run; pass by default */ data[0] = 0; data[1] = 0; data[2] = 0; @@ -1925,6 +1941,10 @@ skip_loopback: clear_bit(__IXGBE_TESTING, &adapter->state); } + + /* if adapter was down, ensure SFP+ optics are disabled again */ + if (!if_running && hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); skip_ol_tests: msleep_interruptible(4 * 1000); } @@ -2093,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_q_vector *q_vector; int i; - u16 tx_itr_param, rx_itr_param; + u16 tx_itr_param, rx_itr_param, tx_itr_prev; bool need_reset = false; - /* don't accept tx specific changes if we've got mixed RxTx vectors */ - if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count - && ec->tx_coalesce_usecs) - return -EINVAL; + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) @@ -2125,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev, else tx_itr_param = adapter->tx_itr_setting; + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + +#if IS_ENABLED(CONFIG_BQL) + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if ((adapter->tx_itr_setting > 1) && + (adapter->tx_itr_setting < IXGBE_100K_ITR)) { + if ((tx_itr_prev == 1) || + (tx_itr_prev > IXGBE_100K_ITR)) + need_reset = true; + } else { + if ((tx_itr_prev > 1) && + (tx_itr_prev < IXGBE_100K_ITR)) + need_reset = true; + } +#endif /* check the old value and enable RSC if necessary */ - need_reset = ixgbe_update_rsc(adapter); + need_reset |= ixgbe_update_rsc(adapter); for (i = 0; i < adapter->num_q_vectors; i++) { q_vector = adapter->q_vector[i]; @@ -2695,6 +2736,14 @@ static int ixgbe_get_ts_info(struct net_device *dev, (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); break; default: @@ -2704,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev, return 0; } +static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) +{ + unsigned int max_combined; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows one queue on the PF */ + max_combined = 1; + } else if (tcs > 1) { + /* For DCB report channels per traffic class */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + /* 8 TC w/ 4 queues per TC */ + max_combined = 4; + } else if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + max_combined = 8; + } else { + /* 4 TC w/ 16 queues per TC */ + max_combined = 16; + } + } else if (adapter->atr_sample_rate) { + /* support up to 64 queues with ATR */ + max_combined = IXGBE_MAX_FDIR_INDICES; + } else { + /* support up to 16 queues with RSS */ + max_combined = IXGBE_MAX_RSS_INDICES; + } + + return max_combined; +} + +static void ixgbe_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = ixgbe_max_channels(adapter); + + /* report info for other vector */ + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; + + /* if ATR is disabled we can exit */ + if (!adapter->atr_sample_rate) + return; + + /* report flow director queues as maximum channels */ + ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices; +} + +static int ixgbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > ixgbe_max_channels(adapter)) + return -EINVAL; + + /* update feature limits from largest to smallest supported values */ + adapter->ring_feature[RING_F_FDIR].limit = count; + + /* cap RSS limit at 16 */ + if (count > IXGBE_MAX_RSS_INDICES) + count = IXGBE_MAX_RSS_INDICES; + adapter->ring_feature[RING_F_RSS].limit = count; + +#ifdef IXGBE_FCOE + /* cap FCoE limit at 8 */ + if (count > IXGBE_FCRETA_SIZE) + count = IXGBE_FCRETA_SIZE; + adapter->ring_feature[RING_F_FCOE].limit = count; + +#endif + /* use setup TC to update any traffic class queue mapping */ + return ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); +} + +static int ixgbe_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + u32 status; + u8 sff8472_rev, addr_mode; + int ret_val = 0; + bool page_swap = false; + + /* avoid concurent i2c reads */ + while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + msleep(100); + + /* used by the service task */ + set_bit(__IXGBE_READ_I2C, &adapter->state); + + /* Check whether we support SFF-8472 or not */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) { + ret_val = -EIO; + goto err_out; + } + + /* addressing mode is not supported */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) { + ret_val = -EIO; + goto err_out; + } + + if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); + page_swap = true; + } + + if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + +err_out: + clear_bit(__IXGBE_READ_I2C, &adapter->state); + return ret_val; +} + +static int ixgbe_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + u32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u8 databyte = 0xFF; + int i = 0; + int ret_val = 0; + + /* ixgbe_get_module_info is called before this function in all + * cases, so we do not need any checks we already do above, + * and can trust ee->len to be a known value. + */ + + while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + msleep(100); + set_bit(__IXGBE_READ_I2C, &adapter->state); + + /* Read the first block, SFF-8079 */ + for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) { + status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); + if (status != 0) { + /* Error occured while reading module */ + ret_val = -EIO; + goto err_out; + } + data[i] = databyte; + } + + /* If the second block is requested, check if SFF-8472 is supported. */ + if (ee->len == ETH_MODULE_SFF_8472_LEN) { + if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP) + return -EOPNOTSUPP; + + /* Read the second block, SFF-8472 */ + for (i = ETH_MODULE_SFF_8079_LEN; + i < ETH_MODULE_SFF_8472_LEN; i++) { + status = hw->phy.ops.read_i2c_sff8472(hw, + i - ETH_MODULE_SFF_8079_LEN, &databyte); + if (status != 0) { + /* Error occured while reading module */ + ret_val = -EIO; + goto err_out; + } + data[i] = databyte; + } + } + +err_out: + clear_bit(__IXGBE_READ_I2C, &adapter->state); + + return ret_val; +} + static const struct ethtool_ops ixgbe_ethtool_ops = { .get_settings = ixgbe_get_settings, .set_settings = ixgbe_set_settings, @@ -2732,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_coalesce = ixgbe_set_coalesce, .get_rxnfc = ixgbe_get_rxnfc, .set_rxnfc = ixgbe_set_rxnfc, + .get_channels = ixgbe_get_channels, + .set_channels = ixgbe_set_channels, .get_ts_info = ixgbe_get_ts_info, + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, }; void ixgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 252850d9a3e0..f58db453a97e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -544,15 +544,14 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, skb_shinfo(skb)->gso_size); first->bytecount += (first->gso_segs - 1) * *hdr_len; - first->tx_flags |= IXGBE_TX_FLAGS_FSO; + first->tx_flags |= IXGBE_TX_FLAGS_TSO; } /* set flag indicating FCOE to ixgbe_tx_map call */ - first->tx_flags |= IXGBE_TX_FLAGS_FCOE; + first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC; - /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ + /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */ mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = skb_transport_offset(skb) + @@ -717,10 +716,8 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) /* Extra buffer to be shared by all DDPs for HW work around */ buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); - if (!buffer) { - e_err(drv, "failed to allocate extra DDP buffer\n"); + if (!buffer) return -ENOMEM; - } dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dma)) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index bf724da99375..3a02759b5e95 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 8c74f739011d..ef5f7a678ce1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) fcoe = &adapter->ring_feature[RING_F_FCOE]; /* limit ourselves based on feature limits */ - fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); fcoe_i = min_t(u16, fcoe_i, fcoe->limit); if (fcoe_i) { @@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) fcoe_i = min_t(u16, fcoe_i, fcoe->limit); if (vmdq_i > 1 && fcoe_i) { - /* reserve no more than number of CPUs */ - fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); - /* alloc queues for FCoE separately */ fcoe->indices = fcoe_i; fcoe->offset = vmdq_i * rss_i; @@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) if (rss_i > 1 && adapter->atr_sample_rate) { f = &adapter->ring_feature[RING_F_FDIR]; - f->indices = min_t(u16, num_online_cpus(), f->limit); - rss_i = max_t(u16, rss_i, f->indices); + rss_i = f->indices = f->limit; if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; @@ -776,19 +771,23 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, { struct ixgbe_q_vector *q_vector; struct ixgbe_ring *ring; - int node = -1; + int node = NUMA_NO_NODE; int cpu = -1; int ring_count, size; + u8 tcs = netdev_get_num_tc(adapter->netdev); ring_count = txr_count + rxr_count; size = sizeof(struct ixgbe_q_vector) + (sizeof(struct ixgbe_ring) * ring_count); /* customize cpu for Flow Director mapping */ - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - if (cpu_online(v_idx)) { - cpu = v_idx; - node = cpu_to_node(cpu); + if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + if (rss_i > 1 && adapter->atr_sample_rate) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b3e3294cfe53..68478d6dfa2d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -66,7 +66,7 @@ static char ixgbe_default_device_descr[] = #define DRV_VERSION "3.11.33-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = - "Copyright (c) 1999-2012 Intel Corporation."; + "Copyright (c) 1999-2013 Intel Corporation."; static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_82598] = &ixgbe_82598_info, @@ -803,6 +803,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + e_warn(drv, "initiating reset due to tx timeout\n"); ixgbe_service_event_schedule(adapter); } } @@ -837,7 +838,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - rmb(); + read_barrier_depends(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) @@ -850,9 +851,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; - if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP)) - ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb); - /* free the skb */ dev_kfree_skb_any(tx_buffer->skb); @@ -1442,7 +1440,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, ixgbe_rx_checksum(rx_ring, rx_desc, skb); - ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); + ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); if ((dev->features & NETIF_F_HW_VLAN_RX) && ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { @@ -2181,10 +2179,10 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) return; if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { - u32 autoneg; + u32 speed; bool link_up = false; - hw->mac.ops.check_link(hw, &autoneg, &link_up, false); + hw->mac.ops.check_link(hw, &speed, &link_up, false); if (link_up) return; @@ -2788,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, /* * set WTHRESH to encourage burst writeback, it should not be set - * higher than 1 when ITR is 0 as it could cause false TX hangs + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled * * In order to avoid issues WTHRESH + PTHRESH should always be equal * to or less than the number of on chip descriptors, which is * currently 40. */ +#if IS_ENABLED(CONFIG_BQL) + if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) +#else if (!ring->q_vector || (ring->q_vector->itr < 8)) +#endif txdctl |= (1 << 16); /* WTHRESH = 1 */ else txdctl |= (8 << 16); /* WTHRESH = 8 */ @@ -2815,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, ring->atr_sample_rate = 0; } + /* initialize XPS */ + if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) { + struct ixgbe_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); /* enable queue */ @@ -3997,25 +4011,25 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) **/ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) { - u32 autoneg; - bool negotiation, link_up = false; + u32 speed; + bool autoneg, link_up = false; u32 ret = IXGBE_ERR_LINK_SETUP; if (hw->mac.ops.check_link) - ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false); + ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); if (ret) goto link_cfg_out; - autoneg = hw->phy.autoneg_advertised; - if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) - ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, - &negotiation); + speed = hw->phy.autoneg_advertised; + if ((!speed) && (hw->mac.ops.get_link_capabilities)) + ret = hw->mac.ops.get_link_capabilities(hw, &speed, + &autoneg); if (ret) goto link_cfg_out; if (hw->mac.ops.setup_link) - ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up); + ret = hw->mac.ops.setup_link(hw, speed, link_up); link_cfg_out: return ret; } @@ -4467,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - unsigned int rss; + unsigned int rss, fdir; u32 fwsm; #ifdef CONFIG_IXGBE_DCB int j; @@ -4482,38 +4496,57 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; - /* Set capability flags */ + /* Set common capability flags and settings */ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); adapter->ring_feature[RING_F_RSS].limit = rss; + adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + adapter->max_q_vectors = MAX_Q_VECTORS_82599; + adapter->atr_sample_rate = 20; + fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); + adapter->ring_feature[RING_F_FDIR].limit = fdir; + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; +#ifdef CONFIG_IXGBE_DCA + adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; +#endif +#ifdef IXGBE_FCOE + adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +#ifdef CONFIG_IXGBE_DCB + /* Default traffic class to use for FCoE */ + adapter->fcoe.up = IXGBE_FCOE_DEFTC; +#endif /* CONFIG_IXGBE_DCB */ +#endif /* IXGBE_FCOE */ + + /* Set MAC specific capability flags and exceptions */ switch (hw->mac.type) { case ixgbe_mac_82598EB: + adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + if (hw->device_id == IXGBE_DEV_ID_82598AT) adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; + adapter->max_q_vectors = MAX_Q_VECTORS_82598; + adapter->ring_feature[RING_F_FDIR].limit = 0; + adapter->atr_sample_rate = 0; + adapter->fdir_pballoc = 0; +#ifdef IXGBE_FCOE + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +#ifdef CONFIG_IXGBE_DCB + adapter->fcoe.up = 0; +#endif /* IXGBE_DCB */ +#endif /* IXGBE_FCOE */ + break; + case ixgbe_mac_82599EB: + if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; case ixgbe_mac_X540: fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); if (fwsm & IXGBE_FWSM_TS_ENABLED) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; - case ixgbe_mac_82599EB: - adapter->max_q_vectors = MAX_Q_VECTORS_82599; - adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; - adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; - if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) - adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; - /* Flow Director hash filters enabled */ - adapter->atr_sample_rate = 20; - adapter->ring_feature[RING_F_FDIR].limit = - IXGBE_MAX_FDIR_INDICES; - adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; -#ifdef IXGBE_FCOE - adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; -#ifdef CONFIG_IXGBE_DCB - /* Default traffic class to use for FCoE */ - adapter->fcoe.up = IXGBE_FCOE_DEFTC; -#endif -#endif /* IXGBE_FCOE */ break; default: break; @@ -4872,7 +4905,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) */ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (adapter->hw.mac.type == ixgbe_mac_82599EB) && - (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); @@ -5535,6 +5568,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) break; } + adapter->last_rx_ptp_check = jiffies; + if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) ixgbe_ptp_start_cyclecounter(adapter); @@ -5615,6 +5650,7 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). */ + e_warn(drv, "initiating reset to clear Tx work after link loss\n"); adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; } } @@ -5679,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) return; + /* concurent i2c reads are not supported */ + if (test_bit(__IXGBE_READ_I2C, &adapter->state)) + return; + /* someone else is in init, wait until next service event */ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) return; @@ -5739,8 +5779,8 @@ sfp_out: static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 autoneg; - bool negotiation; + u32 speed; + bool autoneg = false; if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) return; @@ -5751,11 +5791,11 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; - autoneg = hw->phy.autoneg_advertised; - if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) - hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); + speed = hw->phy.autoneg_advertised; + if ((!speed) && (hw->mac.ops.get_link_capabilities)) + hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); if (hw->mac.ops.setup_link) - hw->mac.ops.setup_link(hw, autoneg, negotiation, true); + hw->mac.ops.setup_link(hw, speed, true); adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; adapter->link_check_timeout = jiffies; @@ -5879,7 +5919,6 @@ static void ixgbe_service_task(struct work_struct *work) struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, service_task); - ixgbe_reset_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); ixgbe_sfp_link_config_subtask(adapter); @@ -5887,7 +5926,11 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_watchdog_subtask(adapter); ixgbe_fdir_reinit_subtask(adapter); ixgbe_check_hang_subtask(adapter); - ixgbe_ptp_overflow_check(adapter); + + if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) { + ixgbe_ptp_overflow_check(adapter); + ixgbe_ptp_rx_hang(adapter); + } ixgbe_service_event_complete(adapter); } @@ -5900,6 +5943,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, type_tucmd; u32 mss_l4len_idx, l4len; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + if (!skb_is_gso(skb)) return 0; @@ -5942,10 +5988,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; - /* mss_l4len_id: use 1 as index for TSO */ + /* mss_l4len_id: use 0 as index for TSO */ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = skb_network_header_len(skb); @@ -5967,12 +6012,9 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) { - if (unlikely(skb->no_fcs)) - first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS; - if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) - return; - } + if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & IXGBE_TX_FLAGS_CC)) + return; } else { u8 l4_hdr = 0; switch (first->protocol) { @@ -6030,30 +6072,32 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, type_tucmd, mss_l4len_idx); } -static __le32 ixgbe_tx_cmd_type(u32 tx_flags) +#define IXGBE_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) { /* set type for advanced descriptor with frame checksum insertion */ - __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | - IXGBE_ADVTXD_DCMD_DEXT); + u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS; /* set HW vlan bit if vlan is present */ - if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN) - cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); - - if (tx_flags & IXGBE_TX_FLAGS_TSTAMP) - cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP); + cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN, + IXGBE_ADVTXD_DCMD_VLE); /* set segmentation enable bits for TSO/FSO */ -#ifdef IXGBE_FCOE - if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO)) -#else - if (tx_flags & IXGBE_TX_FLAGS_TSO) -#endif - cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); + cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO, + IXGBE_ADVTXD_DCMD_TSE); + + /* set timestamp bit if present */ + cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, + IXGBE_ADVTXD_MAC_TSTAMP); /* insert frame checksum */ - if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS)) - cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS); + cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); return cmd_type; } @@ -6061,36 +6105,27 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, u32 tx_flags, unsigned int paylen) { - __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); + u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; /* enable L4 checksum for TSO and TX checksum offload */ - if (tx_flags & IXGBE_TX_FLAGS_CSUM) - olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); + olinfo_status |= IXGBE_SET_FLAG(tx_flags, + IXGBE_TX_FLAGS_CSUM, + IXGBE_ADVTXD_POPTS_TXSM); /* enble IPv4 checksum for TSO */ - if (tx_flags & IXGBE_TX_FLAGS_IPV4) - olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); - - /* use index 1 context for TSO/FSO/FCOE */ -#ifdef IXGBE_FCOE - if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE)) -#else - if (tx_flags & IXGBE_TX_FLAGS_TSO) -#endif - olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); + olinfo_status |= IXGBE_SET_FLAG(tx_flags, + IXGBE_TX_FLAGS_IPV4, + IXGBE_ADVTXD_POPTS_IXSM); /* * Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running */ -#ifdef IXGBE_FCOE - if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE)) -#else - if (tx_flags & IXGBE_TX_FLAGS_TXSW) -#endif - olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); + olinfo_status |= IXGBE_SET_FLAG(tx_flags, + IXGBE_TX_FLAGS_CC, + IXGBE_ADVTXD_CC); - tx_desc->read.olinfo_status = olinfo_status; + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ @@ -6100,22 +6135,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, const u8 hdr_len) { - dma_addr_t dma; struct sk_buff *skb = first->skb; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned int data_len = skb->data_len; - unsigned int size = skb_headlen(skb); - unsigned int paylen = skb->len - hdr_len; + struct skb_frag_struct *frag; + dma_addr_t dma; + unsigned int data_len, size; u32 tx_flags = first->tx_flags; - __le32 cmd_type; + u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags); u16 i = tx_ring->next_to_use; tx_desc = IXGBE_TX_DESC(tx_ring, i); - ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen); - cmd_type = ixgbe_tx_cmd_type(tx_flags); + ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; #ifdef IXGBE_FCOE if (tx_flags & IXGBE_TX_FLAGS_FCOE) { @@ -6129,19 +6164,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, #endif dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) - goto dma_error; - /* record length, and DMA address */ - dma_unmap_len_set(first, len, size); - dma_unmap_addr_set(first, dma, dma); + tx_buffer = first; - tx_desc->read.buffer_addr = cpu_to_le64(dma); + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); - for (;;) { while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { tx_desc->read.cmd_type_len = - cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); + cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD); i++; tx_desc++; @@ -6149,18 +6187,18 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, tx_desc = IXGBE_TX_DESC(tx_ring, 0); i = 0; } + tx_desc->read.olinfo_status = 0; dma += IXGBE_MAX_DATA_PER_TXD; size -= IXGBE_MAX_DATA_PER_TXD; tx_desc->read.buffer_addr = cpu_to_le64(dma); - tx_desc->read.olinfo_status = 0; } if (likely(!data_len)) break; - tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); i++; tx_desc++; @@ -6168,6 +6206,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, tx_desc = IXGBE_TX_DESC(tx_ring, 0); i = 0; } + tx_desc->read.olinfo_status = 0; #ifdef IXGBE_FCOE size = min_t(unsigned int, data_len, skb_frag_size(frag)); @@ -6178,22 +6217,13 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) - goto dma_error; tx_buffer = &tx_ring->tx_buffer_info[i]; - dma_unmap_len_set(tx_buffer, len, size); - dma_unmap_addr_set(tx_buffer, dma, dma); - - tx_desc->read.buffer_addr = cpu_to_le64(dma); - tx_desc->read.olinfo_status = 0; - - frag++; } /* write last descriptor with RS and EOP bits */ - cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); - tx_desc->read.cmd_type_len = cmd_type; + cmd_type |= size | IXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); @@ -6354,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) return __ixgbe_maybe_stop_tx(tx_ring, size); } +#ifdef IXGBE_FCOE static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) { - struct ixgbe_adapter *adapter = netdev_priv(dev); - int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : - smp_processor_id(); -#ifdef IXGBE_FCOE - __be16 protocol = vlan_get_protocol(skb); + struct ixgbe_adapter *adapter; + struct ixgbe_ring_feature *f; + int txq; - if (((protocol == htons(ETH_P_FCOE)) || - (protocol == htons(ETH_P_FIP))) && - (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { - struct ixgbe_ring_feature *f; + /* + * only execute the code below if protocol is FCoE + * or FIP and we have FCoE enabled on the adapter + */ + switch (vlan_get_protocol(skb)) { + case __constant_htons(ETH_P_FCOE): + case __constant_htons(ETH_P_FIP): + adapter = netdev_priv(dev); - f = &adapter->ring_feature[RING_F_FCOE]; + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + break; + default: + return __netdev_pick_tx(dev, skb); + } - while (txq >= f->indices) - txq -= f->indices; - txq += adapter->ring_feature[RING_F_FCOE].offset; + f = &adapter->ring_feature[RING_F_FCOE]; - return txq; - } -#endif + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : + smp_processor_id(); - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - while (unlikely(txq >= dev->real_num_tx_queues)) - txq -= dev->real_num_tx_queues; - return txq; - } + while (txq >= f->indices) + txq -= f->indices; - return skb_tx_hash(dev, skb); + return txq + f->offset; } +#endif netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) @@ -6446,6 +6478,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IXGBE_TX_FLAGS_TSTAMP; + + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); } #ifdef CONFIG_PCI_IOV @@ -6454,7 +6491,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, * Tx switch had been disabled. */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - tx_flags |= IXGBE_TX_FLAGS_TXSW; + tx_flags |= IXGBE_TX_FLAGS_CC; #endif /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ @@ -6785,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) } } +#endif /* CONFIG_IXGBE_DCB */ /** * ixgbe_setup_tc - configure net_device for multiple traffic classes * @@ -6810,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ixgbe_close(dev); ixgbe_clear_interrupt_scheme(adapter); +#ifdef CONFIG_IXGBE_DCB if (tc) { netdev_set_num_tc(dev, tc); ixgbe_set_prio_tc_map(adapter); @@ -6832,15 +6871,28 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) adapter->dcb_cfg.pfc_mode_enable = false; } - ixgbe_init_interrupt_scheme(adapter); ixgbe_validate_rtr(adapter, tc); + +#endif /* CONFIG_IXGBE_DCB */ + ixgbe_init_interrupt_scheme(adapter); + if (netif_running(dev)) - ixgbe_open(dev); + return ixgbe_open(dev); return 0; } -#endif /* CONFIG_IXGBE_DCB */ +#ifdef CONFIG_PCI_IOV +void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); + rtnl_unlock(); +} + +#endif void ixgbe_do_reset(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -6986,7 +7038,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } -static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, +static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr) { @@ -7063,7 +7115,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, } static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev) + struct net_device *dev, + u32 filter_mask) { struct ixgbe_adapter *adapter = netdev_priv(dev); u16 mode; @@ -7083,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, +#ifdef IXGBE_FCOE .ndo_select_queue = ixgbe_select_queue, +#endif .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, @@ -7195,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; static int cards_found; int i, err, pci_using_dac; + unsigned int indices = MAX_TX_QUEUES; u8 part_str[IXGBE_PBANUM_LENGTH]; - unsigned int indices = num_possible_cpus(); - unsigned int dcb_max = 0; #ifdef IXGBE_FCOE u16 device_caps; #endif @@ -7246,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); pci_save_state(pdev); + if (ii->mac == ixgbe_mac_82598EB) { #ifdef CONFIG_IXGBE_DCB - if (ii->mac == ixgbe_mac_82598EB) - dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, - IXGBE_MAX_RSS_INDICES); - else - dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, - IXGBE_MAX_FDIR_INDICES); + /* 8 TC w/ 4 queues per TC */ + indices = 4 * MAX_TRAFFIC_CLASS; +#else + indices = IXGBE_MAX_RSS_INDICES; #endif + } - if (ii->mac == ixgbe_mac_82598EB) - indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); - else - indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); - -#ifdef IXGBE_FCOE - indices += min_t(unsigned int, num_possible_cpus(), - IXGBE_MAX_FCOE_INDICES); -#endif - indices = max_t(unsigned int, dcb_max, indices); netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); if (!netdev) { err = -ENOMEM; @@ -7367,7 +7411,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } #ifdef CONFIG_PCI_IOV - ixgbe_enable_sriov(adapter, ii); + /* SR-IOV not supported on the 82598 */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + goto skip_sriov; + /* Mailbox */ + ixgbe_init_mbx_params_pf(hw); + memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); + ixgbe_enable_sriov(adapter); + pci_sriov_set_totalvfs(pdev, 63); +skip_sriov: #endif netdev->features = NETIF_F_SG | @@ -7411,13 +7463,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef IXGBE_FCOE if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { + unsigned int fcoe_l; + if (hw->mac.ops.get_device_caps) { hw->mac.ops.get_device_caps(hw, &device_caps); if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; } - adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE; + + fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); + adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; netdev->features |= NETIF_F_FSO | NETIF_F_FCOE_CRC; @@ -7445,9 +7501,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); - memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); - if (!is_valid_ether_addr(netdev->perm_addr)) { + if (!is_valid_ether_addr(netdev->dev_addr)) { e_dev_err("invalid MAC address\n"); err = -EIO; goto err_sw_init; @@ -7624,8 +7679,14 @@ static void ixgbe_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); - ixgbe_disable_sriov(adapter); - +#ifdef CONFIG_PCI_IOV + /* + * Only disable SR-IOV on unload if the user specified the now + * deprecated max_vfs module parameter. + */ + if (max_vfs) + ixgbe_disable_sriov(adapter); +#endif ixgbe_clear_interrupt_scheme(adapter); ixgbe_release_hw_control(adapter); @@ -7730,6 +7791,8 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, if (vfdev) { e_dev_err("Issuing VFLR to VF %d\n", vf); pci_write_config_dword(vfdev, 0xA8, 0x00008000); + /* Free device reference count */ + pci_dev_put(vfdev); } pci_cleanup_aer_uncorrect_error_status(pdev); @@ -7839,6 +7902,7 @@ static struct pci_driver ixgbe_driver = { .resume = ixgbe_resume, #endif .shutdown = ixgbe_shutdown, + .sriov_configure = ixgbe_pci_sriov_configure, .err_handler = &ixgbe_err_handler }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index 1f3e32b576a5..d4a64e665398 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index 42dd65e6ac97..e44ff47659b5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 71659edf81aa..060d2ad2ac96 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -494,11 +494,9 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled **/ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete) { @@ -854,11 +852,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, - &identifier); + &identifier); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; /* LAN ID is needed for sfp_type determination */ @@ -872,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY, &cable_tech); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; /* ID Module @@ -986,30 +976,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) if (hw->phy.type != ixgbe_phy_nl) { hw->phy.id = identifier; status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE0, - &oui_bytes[0]); + IXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE1, &oui_bytes[1]); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE2, &oui_bytes[2]); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; vendor_oui = @@ -1206,6 +1190,22 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, } /** + * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR2, + sff8472_data); +} + +/** * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface * @hw: pointer to hardware structure * @byte_offset: EEPROM byte offset to write @@ -1293,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, break; fail: + ixgbe_i2c_bus_clear(hw); hw->mac.ops.release_swfw_sync(hw, swfw_mask); msleep(100); - ixgbe_i2c_bus_clear(hw); retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte read error - Retrying.\n"); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index cc18165b4c05..886a3431cf5b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -30,6 +30,7 @@ #include "ixgbe_type.h" #define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 +#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 /* EEPROM byte offsets */ #define IXGBE_SFF_IDENTIFIER 0x0 @@ -41,6 +42,8 @@ #define IXGBE_SFF_10GBE_COMP_CODES 0x3 #define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 #define IXGBE_SFF_CABLE_SPEC_COMP 0x3C +#define IXGBE_SFF_SFF_8472_SWAP 0x5C +#define IXGBE_SFF_SFF_8472_COMP 0x5E /* Bitmasks */ #define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 @@ -51,6 +54,7 @@ #define IXGBE_SFF_1GBASET_CAPABLE 0x8 #define IXGBE_SFF_10GBASESR_CAPABLE 0x10 #define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_SFF_ADDRESSING_MODE 0x4 #define IXGBE_I2C_EEPROM_READ_MASK 0x100 #define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 @@ -88,6 +92,9 @@ #define IXGBE_TN_LASI_STATUS_REG 0x9005 #define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 +/* SFP+ SFF-8472 Compliance code */ +#define IXGBE_SFF_SFF_8472_UNSUP 0x00 + s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); @@ -98,7 +105,6 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete); s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, @@ -126,6 +132,8 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); +s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); #endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index bb9256a1b0a9..331987d6815c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -96,15 +96,12 @@ #define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL #define IXGBE_OVERFLOW_PERIOD (HZ * 30) +#define IXGBE_PTP_TX_TIMEOUT (HZ * 15) #ifndef NSECS_PER_SEC #define NSECS_PER_SEC 1000000000ULL #endif -static struct sock_filter ptp_filter[] = { - PTP_FILTER -}; - /** * ixgbe_ptp_setup_sdp * @hw: the hardware private structure @@ -405,149 +402,145 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) } } - /** - * ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow - * @work: structure containing information about this work task + * ixgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow + * @adapter: private adapter struct * - * this work function is scheduled to continue reading the timecounter + * this watchdog task periodically reads the timecounter * in order to prevent missing when the system time registers wrap - * around. This needs to be run approximately twice a minute when no - * PTP activity is occurring. + * around. This needs to be run approximately twice a minute. */ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) { - unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies; + bool timeout = time_is_before_jiffies(adapter->last_overflow_check + + IXGBE_OVERFLOW_PERIOD); struct timespec ts; - if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) && - (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) { + if (timeout) { ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); adapter->last_overflow_check = jiffies; } } /** - * ixgbe_ptp_match - determine if this skb matches a ptp packet - * @skb: pointer to the skb - * @hwtstamp: pointer to the hwtstamp_config to check - * - * Determine whether the skb should have been timestamped, assuming the - * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet - * should have a timestamp waiting in the registers, and 0 otherwise. + * ixgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure * - * V1 packets have to check the version type to determine whether they are - * correct. However, we can't directly access the data because it might be - * fragmented in the SKB, in paged memory. In order to work around this, we - * use skb_copy_bits which will properly copy the data whether it is in the - * paged memory fragments or not. We have to copy the IP header as well as the - * message type. + * this watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. */ -static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter) +void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) { - struct iphdr iph; - u8 msgtype; - unsigned int type, offset; - - if (rx_filter == HWTSTAMP_FILTER_NONE) - return 0; - - type = sk_run_filter(skb, ptp_filter); - - if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT)) - return type & PTP_CLASS_V2; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring *rx_ring; + u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + unsigned long rx_event; + int n; - /* For the remaining cases actually check message type */ - switch (type) { - case PTP_CLASS_V1_IPV4: - skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph)); - offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL; - break; - case PTP_CLASS_V1_IPV6: - offset = OFF_PTP6 + OFF_PTP_CONTROL; - break; - default: - /* other cases invalid or handled above */ - return 0; + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; } - /* Make sure our buffer is long enough */ - if (skb->len < offset) - return 0; + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } - skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype)); + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5*HZ)) { + IXGBE_READ_REG(hw, IXGBE_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; - switch (rx_filter) { - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG); - break; - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG); - break; - default: - return 0; + e_warn(drv, "clearing RX Timestamp hang"); } } /** * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp - * @q_vector: structure containing interrupt and ring information - * @skb: particular skb to send timestamp with + * @adapter: the private adapter struct * * if the timestamp is valid, we convert it into the timecounter ns * value, then store that result into the shhwtstamps structure which * is passed up the network stack */ -void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb) +static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) { - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; + struct ixgbe_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; u64 regval = 0, ns; - u32 tsynctxctl; unsigned long flags; - /* we cannot process timestamps on a ring without a q_vector */ - if (!q_vector || !q_vector->adapter) - return; - - adapter = q_vector->adapter; - hw = &adapter->hw; - - tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32; - /* - * if TX timestamp is not valid, exit after clearing the - * timestamp registers - */ - if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID)) - return; - spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_cyc2time(&adapter->tc, regval); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(ns); - skb_tstamp_tx(skb, &shhwtstamps); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; +} + +/** + * ixgbe_ptp_tx_hwtstamp_work + * @work: pointer to the work struct + * + * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware + * timestamp has been taken for the current skb. It is necesary, because the + * descriptor's "done" bit does not correlate with the timestamp event. + */ +static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) +{ + struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, + ptp_tx_work); + struct ixgbe_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + IXGBE_PTP_TX_TIMEOUT); + u32 tsynctxctl; + + /* we have to have a valid skb */ + if (!adapter->ptp_tx_skb) + return; + + if (timeout) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + e_warn(drv, "clearing Tx Timestamp hang"); + return; + } + + tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) + ixgbe_ptp_tx_hwtstamp(adapter); + else + /* reschedule to keep checking if it's not available yet */ + schedule_work(&adapter->ptp_tx_work); } /** - * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp + * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp * @q_vector: structure containing interrupt and ring information - * @rx_desc: the rx descriptor * @skb: particular skb to send timestamp with * * if the timestamp is valid, we convert it into the timecounter ns * value, then store that result into the shhwtstamps structure which * is passed up the network stack */ -void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) +void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) { struct ixgbe_adapter *adapter; struct ixgbe_hw *hw; @@ -563,37 +556,17 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, adapter = q_vector->adapter; hw = &adapter->hw; - if (likely(!ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter))) - return; - + /* + * Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); - - /* Check if we have a valid timestamp and make sure the skb should - * have been timestamped */ if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) return; - /* - * Always read the registers, in order to clear a possible fault - * because of stagnant RX timestamp values for a packet that never - * reached the queue. - */ regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; - /* - * If the timestamp bit is set in the packet's descriptor, we know the - * timestamp belongs to this packet. No other packet can be - * timestamped until the registers for timestamping have been read. - * Therefor only one packet with this bit can be in the queue at a - * time, and the rx timestamp values that were in the registers belong - * to this packet. - * - * If nothing went wrong, then it should have a skb_shared_tx that we - * can turn into a skb_shared_hwtstamps. - */ - if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) - return; spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_cyc2time(&adapter->tc, regval); @@ -698,9 +671,6 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, return 0; } - /* Store filter value for later use */ - adapter->rx_hwtstamp_filter = config.rx_filter; - /* define ethertype filter for timestamping L2 packets */ if (is_l2) IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), @@ -902,11 +872,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) return; } - /* initialize the ptp filter */ - if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) - e_dev_warn("ptp_filter_init failed\n"); - spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work); adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); @@ -938,6 +905,12 @@ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) ixgbe_ptp_setup_sdp(adapter); + cancel_work_sync(&adapter->ptp_tx_work); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); adapter->ptp_clock = NULL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 85cddac673ef..d44b4d21268c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -44,50 +44,11 @@ #include "ixgbe_sriov.h" #ifdef CONFIG_PCI_IOV -void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, - const struct ixgbe_info *ii) +static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int num_vf_macvlans, i; struct vf_macvlans *mv_list; - int pre_existing_vfs = 0; - - pre_existing_vfs = pci_num_vf(adapter->pdev); - if (!pre_existing_vfs && !adapter->num_vfs) - return; - - /* If there are pre-existing VFs then we have to force - * use of that many because they were not deleted the last - * time someone removed the PF driver. That would have - * been because they were allocated to guest VMs and can't - * be removed. Go ahead and just re-enable the old amount. - * If the user wants to change the number of VFs they can - * use ethtool while making sure no VFs are allocated to - * guest VMs... i.e. the right way. - */ - if (pre_existing_vfs) { - adapter->num_vfs = pre_existing_vfs; - dev_warn(&adapter->pdev->dev, "Virtual Functions already " - "enabled for this device - Please reload all " - "VF drivers to avoid spoofed packet errors\n"); - } else { - int err; - /* - * The 82599 supports up to 64 VFs per physical function - * but this implementation limits allocation to 63 so that - * basic networking resources are still available to the - * physical function. If the user requests greater thn - * 63 VFs then it is an error - reset to default of zero. - */ - adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63); - - err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); - if (err) { - e_err(probe, "Failed to enable PCI sriov: %d\n", err); - adapter->num_vfs = 0; - return; - } - } adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); @@ -128,12 +89,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, kcalloc(adapter->num_vfs, sizeof(struct vf_data_storage), GFP_KERNEL); if (adapter->vfinfo) { - /* Now that we're sure SR-IOV is enabled - * and memory allocated set up the mailbox parameters - */ - ixgbe_init_mbx_params_pf(hw); - memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); - /* limit trafffic classes based on VFs enabled */ if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (adapter->num_vfs < 16)) { @@ -157,10 +112,62 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, /* enable spoof checking for all VFs */ for (i = 0; i < adapter->num_vfs; i++) adapter->vfinfo[i].spoofchk_enabled = true; + return 0; + } + + return -ENOMEM; +} + +/* Note this function is called when the user wants to enable SR-IOV + * VFs using the now deprecated module parameter + */ +void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) +{ + int pre_existing_vfs = 0; + + pre_existing_vfs = pci_num_vf(adapter->pdev); + if (!pre_existing_vfs && !adapter->num_vfs) return; + + if (!pre_existing_vfs) + dev_warn(&adapter->pdev->dev, + "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); + + /* If there are pre-existing VFs then we have to force + * use of that many - over ride any module parameter value. + * This may result from the user unloading the PF driver + * while VFs were assigned to guest VMs or because the VFs + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { + adapter->num_vfs = pre_existing_vfs; + dev_warn(&adapter->pdev->dev, + "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); + } else { + int err; + /* + * The 82599 supports up to 64 VFs per physical function + * but this implementation limits allocation to 63 so that + * basic networking resources are still available to the + * physical function. If the user requests greater thn + * 63 VFs then it is an error - reset to default of zero. + */ + adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63); + + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + adapter->num_vfs = 0; + return; + } } - /* Oh oh */ + if (!__ixgbe_enable_sriov(adapter)) + return; + + /* If we have gotten to this point then there is no memory available + * to manage the VF devices - print message and bail. + */ e_err(probe, "Unable to allocate memory for VF Data Storage - " "SRIOV disabled\n"); ixgbe_disable_sriov(adapter); @@ -200,11 +207,12 @@ static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter) } #endif /* #ifdef CONFIG_PCI_IOV */ -void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) +int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 gpie; u32 vmdctl; + int rss; /* set num VFs to 0 to prevent access to vfinfo */ adapter->num_vfs = 0; @@ -219,7 +227,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) /* if SR-IOV is already disabled then there is nothing to do */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return; + return 0; #ifdef CONFIG_PCI_IOV /* @@ -229,7 +237,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) */ if (ixgbe_vfs_are_assigned(adapter)) { e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); - return; + return -EPERM; } /* disable iov and allow time for transactions to clear */ pci_disable_sriov(adapter->pdev); @@ -252,10 +260,94 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; adapter->ring_feature[RING_F_VMDQ].offset = 0; + rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); + adapter->ring_feature[RING_F_RSS].limit = rss; + /* take a breather then clean up driver data */ msleep(100); adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; + return 0; +} + +static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct ixgbe_adapter *adapter = pci_get_drvdata(dev); + int err = 0; + int i; + int pre_existing_vfs = pci_num_vf(dev); + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = ixgbe_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + goto out; + + if (err) + goto err_out; + + /* While the SR-IOV capability structure reports total VFs to be + * 64 we limit the actual number that can be allocated to 63 so + * that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. + */ + if (num_vfs > 63) { + err = -EPERM; + goto err_out; + } + + adapter->num_vfs = num_vfs; + + err = __ixgbe_enable_sriov(adapter); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_vfs; i++) + ixgbe_vf_configuration(dev, (i | 0x10000000)); + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d\n", err); + goto err_out; + } + ixgbe_sriov_reinit(adapter); + +out: + return num_vfs; + +err_out: + return err; +#endif + return 0; +} + +static int ixgbe_pci_sriov_disable(struct pci_dev *dev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(dev); + int err; + u32 current_flags = adapter->flags; + + err = ixgbe_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ + if (!err && current_flags != adapter->flags) { + /* ixgbe_disable_sriov() doesn't clear VMDQ flag */ + adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; +#ifdef CONFIG_PCI_IOV + ixgbe_sriov_reinit(adapter); +#endif + } + + return err; +} + +int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + if (num_vfs == 0) + return ixgbe_pci_sriov_disable(dev); + else + return ixgbe_pci_sriov_enable(dev, num_vfs); } static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, @@ -447,15 +539,6 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); } -static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, - u16 vid, u16 qos, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; - - IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); -} - static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 1be1d30e4e78..4713f9fc7f46 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -41,12 +41,20 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); -void ixgbe_disable_sriov(struct ixgbe_adapter *adapter); +int ixgbe_disable_sriov(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV -void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, - const struct ixgbe_info *ii); +void ixgbe_enable_sriov(struct ixgbe_adapter *adapter); #endif +int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +static inline void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, + u16 vid, u16 qos, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; + + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); +} #endif /* _IXGBE_SRIOV_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c index 16ddf14e8ba4..d118def16f35 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 9cd8a13711d3..6652e96c352d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -2822,7 +2822,7 @@ struct ixgbe_mac_operations { void (*disable_tx_laser)(struct ixgbe_hw *); void (*enable_tx_laser)(struct ixgbe_hw *); void (*flap_tx_laser)(struct ixgbe_hw *); - s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); @@ -2869,12 +2869,12 @@ struct ixgbe_phy_operations { s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); s32 (*setup_link)(struct ixgbe_hw *); - s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, - bool); + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); + s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); s32 (*check_overtemp)(struct ixgbe_hw *); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index c73b92993391..66c5e946284e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -72,14 +72,13 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed **/ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { - return hw->phy.ops.setup_link_speed(hw, speed, autoneg, + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); } @@ -879,6 +878,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = { .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, .read_i2c_byte = &ixgbe_read_i2c_byte_generic, .write_i2c_byte = &ixgbe_write_i2c_byte_generic, + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, .check_overtemp = &ixgbe_tn_check_overtemp, diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 8f2070439b59..c9d0c12d6f04 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -99,6 +99,7 @@ static int ixgbevf_get_settings(struct net_device *netdev, ecmd->transceiver = XCVR_DUMMY1; ecmd->port = -1; + hw->mac.get_link_status = 1; hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (link_up) { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 257357ae66c3..c3db6cd69b68 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -750,12 +750,37 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) static irqreturn_t ixgbevf_msix_other(int irq, void *data) { struct ixgbevf_adapter *adapter = data; + struct pci_dev *pdev = adapter->pdev; struct ixgbe_hw *hw = &adapter->hw; + u32 msg; + bool got_ack = false; hw->mac.get_link_status = 1; + if (!hw->mbx.ops.check_for_ack(hw)) + got_ack = true; - if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies); + if (!hw->mbx.ops.check_for_msg(hw)) { + hw->mbx.ops.read(hw, &msg, 1); + + if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) { + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 1)); + adapter->link_up = false; + } + + if (msg & IXGBE_VT_MSGTYPE_NACK) + dev_info(&pdev->dev, + "Last Request of type %2.2x to PF Nacked\n", + msg & 0xFF); + hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; + } + + /* checking for the ack clears the PFACK bit. Place + * it back in the v2p_mailbox cache so that anyone + * polling for an ack will not miss it + */ + if (got_ack) + hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); @@ -2095,6 +2120,9 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; int i; + if (!adapter->link_up) + return; + UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, adapter->stats.vfgprc); UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, @@ -2217,9 +2245,23 @@ static void ixgbevf_watchdog_task(struct work_struct *work) if (link_up) { if (!netif_carrier_ok(netdev)) { - hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n", - (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? - 10 : 1); + char *link_speed_string; + switch (link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + link_speed_string = "10 Gbps"; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + link_speed_string = "1 Gbps"; + break; + case IXGBE_LINK_SPEED_100_FULL: + link_speed_string = "100 Mbps"; + break; + default: + link_speed_string = "unknown speed"; + break; + } + dev_info(&adapter->pdev->dev, + "NIC Link is Up, %s\n", link_speed_string); netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); } @@ -2227,7 +2269,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work) adapter->link_up = false; adapter->link_speed = 0; if (netif_carrier_ok(netdev)) { - hw_dbg(&adapter->hw, "NIC Link is Down\n"); + dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); } @@ -3328,8 +3370,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_sw_init; /* The HW MAC address was set and/or determined in sw_init */ - memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); - if (!is_valid_ether_addr(netdev->dev_addr)) { pr_err("invalid MAC address\n"); err = -EIO; diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index bc58f1dc22f5..5409fe876a44 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -695,9 +695,9 @@ static void netdev_get_drvinfo(struct net_device *dev, { struct korina_private *lp = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, lp->dev->name); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index c124e67a1a1c..6a2127489af7 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -302,9 +302,9 @@ ltq_etop_hw_init(struct net_device *dev) static void ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, "Lantiq ETOP"); - strcpy(info->bus_info, "internal"); - strcpy(info->version, DRV_VERSION); + strlcpy(info->driver, "Lantiq ETOP", sizeof(info->driver)); + strlcpy(info->bus_info, "internal", sizeof(info->bus_info)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } static int @@ -393,8 +393,8 @@ ltq_etop_mdio_probe(struct net_device *dev) return -ENODEV; } - phydev = phy_connect(dev, dev_name(&phydev->dev), <q_etop_mdio_link, - 0, priv->pldata->mii_mode); + phydev = phy_connect(dev, dev_name(&phydev->dev), + <q_etop_mdio_link, priv->pldata->mii_mode); if (IS_ERR(phydev)) { netdev_err(dev, "Could not attach to PHY\n"); @@ -655,7 +655,7 @@ ltq_etop_init(struct net_device *dev) /* Set addr_assign_type here, ltq_etop_set_mac_address would reset it. */ if (random_mac) - dev->addr_assign_type |= NET_ADDR_RANDOM; + dev->addr_assign_type = NET_ADDR_RANDOM; ltq_etop_set_multicast_list(dev); err = ltq_etop_mdio_init(dev); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 84c13263c514..29140502b71a 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1879,12 +1879,10 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index) memset(rxq->rx_desc_area, 0, size); rxq->rx_desc_area_size = size; - rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), - GFP_KERNEL); - if (rxq->rx_skb == NULL) { - netdev_err(mp->dev, "can't allocate rx skb ring\n"); + rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb), + GFP_KERNEL); + if (rxq->rx_skb == NULL) goto out_free; - } rx_desc = rxq->rx_desc_area; for (i = 0; i < rxq->rx_ring_size; i++) { @@ -2789,7 +2787,7 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) phy_reset(mp); - phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII); + phy_attach(mp->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_GMII); if (speed == 0) { phy->autoneg = AUTONEG_ENABLE; diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 74f1c157a480..77b7c80262f4 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -164,7 +164,6 @@ static int orion_mdio_probe(struct platform_device *pdev) bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!bus->irq) { - dev_err(&pdev->dev, "Cannot allocate PHY IRQ array\n"); mdiobus_free(bus); return -ENOMEM; } diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 10d678d3dd01..037ed866c22f 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -627,7 +627,6 @@ static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; memcpy(oldMac, dev->dev_addr, ETH_ALEN); - dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); netif_addr_lock_bh(dev); update_hash_table_mac_address(pep, oldMac, dev->dev_addr); @@ -1391,7 +1390,7 @@ static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex) struct phy_device *phy = pep->phy; ethernet_phy_reset(pep); - phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII); + phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII); if (speed == 0) { phy->autoneg = AUTONEG_ENABLE; @@ -1444,10 +1443,10 @@ static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) static void pxa168_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strncpy(info->driver, DRIVER_NAME, 32); - strncpy(info->version, DRIVER_VERSION, 32); - strncpy(info->fw_version, "N/A", 32); - strncpy(info->bus_info, "N/A", 32); + strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); } static const struct ethtool_ops pxa168_ethtool_ops = { diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 5544a1fe2f94..171f4b3dda07 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3855,7 +3855,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, /* read the mac address */ memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); return dev; } @@ -3917,10 +3916,9 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* space for skge@pci:0000:04:00.0 */ hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + strlen(pci_name(pdev)) + 1, GFP_KERNEL); - if (!hw) { - dev_err(&pdev->dev, "cannot allocate hardware struct\n"); + if (!hw) goto err_out_free_regions; - } + sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); hw->pdev = pdev; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 3269eb38cc57..fc07ca35721b 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4801,7 +4801,6 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, /* read the mac address */ memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); return dev; } @@ -4970,10 +4969,8 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + strlen(pci_name(pdev)) + 1, GFP_KERNEL); - if (!hw) { - dev_err(&pdev->dev, "cannot allocate hardware struct\n"); + if (!hw) goto err_out_free_regions; - } hw->pdev = pdev; sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 03447dad07e9..00f25b5f297f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -35,6 +35,8 @@ #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/mlx4/driver.h> +#include <linux/in.h> +#include <net/ip.h> #include "mlx4_en.h" #include "en_port.h" @@ -494,7 +496,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev, mutex_lock(&mdev->state_lock); if (priv->port_up) { port_up = 1; - mlx4_en_stop_port(dev); + mlx4_en_stop_port(dev, 1); } mlx4_en_free_resources(priv); @@ -589,7 +591,7 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev, mutex_lock(&mdev->state_lock); if (priv->port_up) { port_up = 1; - mlx4_en_stop_port(dev); + mlx4_en_stop_port(dev, 1); } priv->prof->rss_rings = rss_rings; @@ -664,27 +666,90 @@ static int mlx4_en_validate_flow(struct net_device *dev, if ((cmd->fs.flow_type & FLOW_EXT)) { if (cmd->fs.m_ext.vlan_etype || - !(cmd->fs.m_ext.vlan_tci == 0 || - cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff))) + !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) == + 0 || + (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) == + cpu_to_be16(VLAN_VID_MASK))) return -EINVAL; + + if (cmd->fs.m_ext.vlan_tci) { + if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID) + return -EINVAL; + + } } return 0; } +static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd, + struct list_head *rule_list_h, + struct mlx4_spec_list *spec_l2, + unsigned char *mac) +{ + int err = 0; + __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); + + spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN); + memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN); + + if ((cmd->fs.flow_type & FLOW_EXT) && + (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) { + spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci; + spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK); + } + + list_add_tail(&spec_l2->list, rule_list_h); + + return err; +} + +static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv, + struct ethtool_rxnfc *cmd, + struct list_head *rule_list_h, + struct mlx4_spec_list *spec_l2, + __be32 ipv4_dst) +{ +#ifdef CONFIG_INET + unsigned char mac[ETH_ALEN]; + + if (!ipv4_is_multicast(ipv4_dst)) { + if (cmd->fs.flow_type & FLOW_MAC_EXT) + memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN); + else + memcpy(&mac, priv->dev->dev_addr, ETH_ALEN); + } else { + ip_eth_mc_map(ipv4_dst, mac); + } + + return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]); +#else + return -EINVAL; +#endif +} + static int add_ip_rule(struct mlx4_en_priv *priv, - struct ethtool_rxnfc *cmd, - struct list_head *list_h) + struct ethtool_rxnfc *cmd, + struct list_head *list_h) { - struct mlx4_spec_list *spec_l3; + int err; + struct mlx4_spec_list *spec_l2 = NULL; + struct mlx4_spec_list *spec_l3 = NULL; struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec; - spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL); - if (!spec_l3) { - en_err(priv, "Fail to alloc ethtool rule.\n"); - return -ENOMEM; + spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL); + spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); + if (!spec_l2 || !spec_l3) { + err = -ENOMEM; + goto free_spec; } + err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2, + cmd->fs.h_u. + usr_ip4_spec.ip4dst); + if (err) + goto free_spec; spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src; if (l3_mask->ip4src) @@ -695,34 +760,52 @@ static int add_ip_rule(struct mlx4_en_priv *priv, list_add_tail(&spec_l3->list, list_h); return 0; + +free_spec: + kfree(spec_l2); + kfree(spec_l3); + return err; } static int add_tcp_udp_rule(struct mlx4_en_priv *priv, struct ethtool_rxnfc *cmd, struct list_head *list_h, int proto) { - struct mlx4_spec_list *spec_l3; - struct mlx4_spec_list *spec_l4; + int err; + struct mlx4_spec_list *spec_l2 = NULL; + struct mlx4_spec_list *spec_l3 = NULL; + struct mlx4_spec_list *spec_l4 = NULL; struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec; - spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL); - spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL); - if (!spec_l4 || !spec_l3) { - en_err(priv, "Fail to alloc ethtool rule.\n"); - kfree(spec_l3); - kfree(spec_l4); - return -ENOMEM; + spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); + spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL); + spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL); + if (!spec_l2 || !spec_l3 || !spec_l4) { + err = -ENOMEM; + goto free_spec; } spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; if (proto == TCP_V4_FLOW) { + err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, + spec_l2, + cmd->fs.h_u. + tcp_ip4_spec.ip4dst); + if (err) + goto free_spec; spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP; spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src; spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst; spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc; spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst; } else { + err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, + spec_l2, + cmd->fs.h_u. + udp_ip4_spec.ip4dst); + if (err) + goto free_spec; spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP; spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src; spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst; @@ -744,6 +827,12 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv, list_add_tail(&spec_l4->list, list_h); return 0; + +free_spec: + kfree(spec_l2); + kfree(spec_l3); + kfree(spec_l4); + return err; } static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev, @@ -751,43 +840,23 @@ static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev, struct list_head *rule_list_h) { int err; - __be64 be_mac; struct ethhdr *eth_spec; - struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_spec_list *spec_l2; - __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); + struct mlx4_en_priv *priv = netdev_priv(dev); err = mlx4_en_validate_flow(dev, cmd); if (err) return err; - spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL); - if (!spec_l2) - return -ENOMEM; - - if (cmd->fs.flow_type & FLOW_MAC_EXT) { - memcpy(&be_mac, cmd->fs.h_ext.h_dest, ETH_ALEN); - } else { - u64 mac = priv->mac & MLX4_MAC_MASK; - be_mac = cpu_to_be64(mac << 16); - } - - spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH; - memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN); - if ((cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) != ETHER_FLOW) - memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN); - - if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) { - spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci; - spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff); - } - - list_add_tail(&spec_l2->list, rule_list_h); - switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { case ETHER_FLOW: + spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); + if (!spec_l2) + return -ENOMEM; + eth_spec = &cmd->fs.h_u.ether_spec; - memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN); + mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, + ð_spec->h_dest[0]); spec_l2->eth.ether_type = eth_spec->h_proto; if (eth_spec->h_proto) spec_l2->eth.ether_type_enable = 1; @@ -861,6 +930,7 @@ static int mlx4_en_flow_replace(struct net_device *dev, loc_rule->id = 0; memset(&loc_rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec)); + list_del(&loc_rule->list); } err = mlx4_flow_attach(priv->mdev->dev, &rule, ®_id); if (err) { @@ -871,6 +941,7 @@ static int mlx4_en_flow_replace(struct net_device *dev, loc_rule->id = reg_id; memcpy(&loc_rule->flow_spec, &cmd->fs, sizeof(struct ethtool_rx_flow_spec)); + list_add_tail(&loc_rule->list, &priv->ethtool_list); out_free_list: list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) { @@ -904,6 +975,7 @@ static int mlx4_en_flow_detach(struct net_device *dev, } rule->id = 0; memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec)); + list_del(&rule->list); out: return err; @@ -952,7 +1024,8 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT || cmd->cmd == ETHTOOL_GRXCLSRULE || cmd->cmd == ETHTOOL_GRXCLSRLALL) && - mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) + (mdev->dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)) return -EINVAL; switch (cmd->cmd) { @@ -988,7 +1061,8 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) + if (mdev->dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up) return -EINVAL; switch (cmd->cmd) { @@ -1037,7 +1111,7 @@ static int mlx4_en_set_channels(struct net_device *dev, mutex_lock(&mdev->state_lock); if (priv->port_up) { port_up = 1; - mlx4_en_stop_port(dev); + mlx4_en_stop_port(dev, 1); } mlx4_en_free_resources(priv); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 3a2b8c65642d..e3c3d122979e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -95,6 +95,28 @@ int en_print(const char *level, const struct mlx4_en_priv *priv, return i; } +void mlx4_en_update_loopback_state(struct net_device *dev, + netdev_features_t features) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED| + MLX4_EN_FLAG_ENABLE_HW_LOOPBACK); + + /* Drop the packet if SRIOV is not enabled + * and not performing the selftest or flb disabled + */ + if (mlx4_is_mfunc(priv->mdev->dev) && + !(features & NETIF_F_LOOPBACK) && !priv->validate_loopback) + priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED; + + /* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest + * is requested + */ + if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback) + priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK; +} + static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) { struct mlx4_en_profile *params = &mdev->profile; @@ -191,10 +213,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev) printk_once(KERN_INFO "%s", mlx4_en_version); - mdev = kzalloc(sizeof *mdev, GFP_KERNEL); + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) { - dev_err(&dev->pdev->dev, "Device struct alloc failed, " - "aborting.\n"); err = -ENOMEM; goto err_free_res; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 75a3f467bb5b..5088dc5c3d1a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -132,17 +132,14 @@ static void mlx4_en_filter_work(struct work_struct *work) .priority = MLX4_DOMAIN_RFS, }; int rc; - __be64 mac; __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); list_add_tail(&spec_eth.list, &rule.list); list_add_tail(&spec_ip.list, &rule.list); list_add_tail(&spec_tcp.list, &rule.list); - mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16); - rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; - memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN); + memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); filter->activated = 0; @@ -413,6 +410,235 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) return 0; } +static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) +{ + unsigned int i; + for (i = ETH_ALEN - 1; i; --i) { + dst_mac[i] = src_mac & 0xff; + src_mac >>= 8; + } + memset(&dst_mac[ETH_ALEN], 0, 2); +} + +static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, + unsigned char *mac, int *qpn, u64 *reg_id) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + int err; + + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_B0: { + struct mlx4_qp qp; + u8 gid[16] = {0}; + + qp.qpn = *qpn; + memcpy(&gid[10], mac, ETH_ALEN); + gid[5] = priv->port; + + err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); + break; + } + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + struct mlx4_spec_list spec_eth = { {NULL} }; + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_PROMISC_NONE, + .priority = MLX4_DOMAIN_NIC, + }; + + rule.port = priv->port; + rule.qpn = *qpn; + INIT_LIST_HEAD(&rule.list); + + spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); + memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + list_add_tail(&spec_eth.list, &rule.list); + + err = mlx4_flow_attach(dev, &rule, reg_id); + break; + } + default: + return -EINVAL; + } + if (err) + en_warn(priv, "Failed Attaching Unicast\n"); + + return err; +} + +static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, + unsigned char *mac, int qpn, u64 reg_id) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_B0: { + struct mlx4_qp qp; + u8 gid[16] = {0}; + + qp.qpn = qpn; + memcpy(&gid[10], mac, ETH_ALEN); + gid[5] = priv->port; + + mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); + break; + } + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + mlx4_flow_detach(dev, reg_id); + break; + } + default: + en_err(priv, "Invalid steering mode.\n"); + } +} + +static int mlx4_en_get_qp(struct mlx4_en_priv *priv) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + struct mlx4_mac_entry *entry; + int index = 0; + int err = 0; + u64 reg_id; + int *qpn = &priv->base_qpn; + u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); + + en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", + priv->dev->dev_addr); + index = mlx4_register_mac(dev, priv->port, mac); + if (index < 0) { + err = index; + en_err(priv, "Failed adding MAC: %pM\n", + priv->dev->dev_addr); + return err; + } + + if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { + int base_qpn = mlx4_get_base_qpn(dev, priv->port); + *qpn = base_qpn + index; + return 0; + } + + err = mlx4_qp_reserve_range(dev, 1, 1, qpn); + en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); + if (err) { + en_err(priv, "Failed to reserve qp for mac registration\n"); + goto qp_err; + } + + err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id); + if (err) + goto steer_err; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + err = -ENOMEM; + goto alloc_err; + } + memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac)); + entry->reg_id = reg_id; + + hlist_add_head_rcu(&entry->hlist, + &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); + + return 0; + +alloc_err: + mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id); + +steer_err: + mlx4_qp_release_range(dev, *qpn, 1); + +qp_err: + mlx4_unregister_mac(dev, priv->port, mac); + return err; +} + +static void mlx4_en_put_qp(struct mlx4_en_priv *priv) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + int qpn = priv->base_qpn; + u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); + + en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", + priv->dev->dev_addr); + mlx4_unregister_mac(dev, priv->port, mac); + + if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { + struct mlx4_mac_entry *entry; + struct hlist_node *n, *tmp; + struct hlist_head *bucket; + unsigned int mac_hash; + + mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX]; + bucket = &priv->mac_hash[mac_hash]; + hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { + if (ether_addr_equal_64bits(entry->mac, + priv->dev->dev_addr)) { + en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n", + priv->port, priv->dev->dev_addr, qpn); + mlx4_en_uc_steer_release(priv, entry->mac, + qpn, entry->reg_id); + mlx4_qp_release_range(dev, qpn, 1); + + hlist_del_rcu(&entry->hlist); + kfree_rcu(entry, rcu); + break; + } + } + } +} + +static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, + unsigned char *new_mac, unsigned char *prev_mac) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + int err = 0; + u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac); + + if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { + struct hlist_head *bucket; + unsigned int mac_hash; + struct mlx4_mac_entry *entry; + struct hlist_node *n, *tmp; + u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac); + + bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; + hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { + if (ether_addr_equal_64bits(entry->mac, prev_mac)) { + mlx4_en_uc_steer_release(priv, entry->mac, + qpn, entry->reg_id); + mlx4_unregister_mac(dev, priv->port, + prev_mac_u64); + hlist_del_rcu(&entry->hlist); + synchronize_rcu(); + memcpy(entry->mac, new_mac, ETH_ALEN); + entry->reg_id = 0; + mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX]; + hlist_add_head_rcu(&entry->hlist, + &priv->mac_hash[mac_hash]); + mlx4_register_mac(dev, priv->port, new_mac_u64); + err = mlx4_en_uc_steer_add(priv, new_mac, + &qpn, + &entry->reg_id); + return err; + } + } + return -EINVAL; + } + + return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); +} + u64 mlx4_en_mac_to_u64(u8 *addr) { u64 mac = 0; @@ -435,7 +661,6 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); - priv->mac = mlx4_en_mac_to_u64(dev->dev_addr); queue_work(mdev->workqueue, &priv->mac_task); return 0; } @@ -450,13 +675,14 @@ static void mlx4_en_do_set_mac(struct work_struct *work) mutex_lock(&mdev->state_lock); if (priv->port_up) { /* Remove old MAC and insert the new one */ - err = mlx4_replace_mac(mdev->dev, priv->port, - priv->base_qpn, priv->mac); + err = mlx4_en_replace_mac(priv, priv->base_qpn, + priv->dev->dev_addr, priv->prev_mac); if (err) en_err(priv, "Failed changing HW MAC address\n"); + memcpy(priv->prev_mac, priv->dev->dev_addr, + sizeof(priv->prev_mac)); } else - en_dbg(HW, priv, "Port is down while " - "registering mac, exiting...\n"); + en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); mutex_unlock(&mdev->state_lock); } @@ -482,7 +708,6 @@ static void mlx4_en_cache_mclist(struct net_device *dev) netdev_for_each_mc_addr(ha, dev) { tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); if (!tmp) { - en_err(priv, "failed to allocate multicast list\n"); mlx4_en_clear_list(dev); return; } @@ -526,181 +751,153 @@ static void update_mclist_flags(struct mlx4_en_priv *priv, } } if (!found) { - new_mc = kmalloc(sizeof(struct mlx4_en_mc_list), + new_mc = kmemdup(src_tmp, + sizeof(struct mlx4_en_mc_list), GFP_KERNEL); - if (!new_mc) { - en_err(priv, "Failed to allocate current multicast list\n"); + if (!new_mc) return; - } - memcpy(new_mc, src_tmp, - sizeof(struct mlx4_en_mc_list)); + new_mc->action = MCLIST_ADD; list_add_tail(&new_mc->list, dst); } } } -static void mlx4_en_set_multicast(struct net_device *dev) +static void mlx4_en_set_rx_mode(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); if (!priv->port_up) return; - queue_work(priv->mdev->workqueue, &priv->mcast_task); + queue_work(priv->mdev->workqueue, &priv->rx_mode_task); } -static void mlx4_en_do_set_multicast(struct work_struct *work) +static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, + struct mlx4_en_dev *mdev) { - struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, - mcast_task); - struct mlx4_en_dev *mdev = priv->mdev; - struct net_device *dev = priv->dev; - struct mlx4_en_mc_list *mclist, *tmp; - u64 mcast_addr = 0; - u8 mc_list[16] = {0}; int err = 0; - mutex_lock(&mdev->state_lock); - if (!mdev->device_up) { - en_dbg(HW, priv, "Card is not up, " - "ignoring multicast change.\n"); - goto out; - } - if (!priv->port_up) { - en_dbg(HW, priv, "Port is down, " - "ignoring multicast change.\n"); - goto out; - } - - if (!netif_carrier_ok(dev)) { - if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { - if (priv->port_state.link_state) { - priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; - netif_carrier_on(dev); - en_dbg(LINK, priv, "Link Up\n"); - } - } - } - - /* - * Promsicuous mode: disable all filters - */ - - if (dev->flags & IFF_PROMISC) { - if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { - if (netif_msg_rx_status(priv)) - en_warn(priv, "Entering promiscuous mode\n"); - priv->flags |= MLX4_EN_FLAG_PROMISC; - - /* Enable promiscouos mode */ - switch (mdev->dev->caps.steering_mode) { - case MLX4_STEERING_MODE_DEVICE_MANAGED: - err = mlx4_flow_steer_promisc_add(mdev->dev, - priv->port, - priv->base_qpn, - MLX4_FS_PROMISC_UPLINK); - if (err) - en_err(priv, "Failed enabling promiscuous mode\n"); - priv->flags |= MLX4_EN_FLAG_MC_PROMISC; - break; - - case MLX4_STEERING_MODE_B0: - err = mlx4_unicast_promisc_add(mdev->dev, - priv->base_qpn, - priv->port); - if (err) - en_err(priv, "Failed enabling unicast promiscuous mode\n"); - - /* Add the default qp number as multicast - * promisc - */ - if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { - err = mlx4_multicast_promisc_add(mdev->dev, - priv->base_qpn, - priv->port); - if (err) - en_err(priv, "Failed enabling multicast promiscuous mode\n"); - priv->flags |= MLX4_EN_FLAG_MC_PROMISC; - } - break; - - case MLX4_STEERING_MODE_A0: - err = mlx4_SET_PORT_qpn_calc(mdev->dev, - priv->port, - priv->base_qpn, - 1); - if (err) - en_err(priv, "Failed enabling promiscuous mode\n"); - break; - } - - /* Disable port multicast filter (unconditionally) */ - err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, - 0, MLX4_MCAST_DISABLE); - if (err) - en_err(priv, "Failed disabling " - "multicast filter\n"); - - /* Disable port VLAN filter */ - err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); - if (err) - en_err(priv, "Failed disabling VLAN filter\n"); - } - goto out; - } - - /* - * Not in promiscuous mode - */ - - if (priv->flags & MLX4_EN_FLAG_PROMISC) { + if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { if (netif_msg_rx_status(priv)) - en_warn(priv, "Leaving promiscuous mode\n"); - priv->flags &= ~MLX4_EN_FLAG_PROMISC; + en_warn(priv, "Entering promiscuous mode\n"); + priv->flags |= MLX4_EN_FLAG_PROMISC; - /* Disable promiscouos mode */ + /* Enable promiscouos mode */ switch (mdev->dev->caps.steering_mode) { case MLX4_STEERING_MODE_DEVICE_MANAGED: - err = mlx4_flow_steer_promisc_remove(mdev->dev, - priv->port, - MLX4_FS_PROMISC_UPLINK); + err = mlx4_flow_steer_promisc_add(mdev->dev, + priv->port, + priv->base_qpn, + MLX4_FS_PROMISC_UPLINK); if (err) - en_err(priv, "Failed disabling promiscuous mode\n"); - priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + en_err(priv, "Failed enabling promiscuous mode\n"); + priv->flags |= MLX4_EN_FLAG_MC_PROMISC; break; case MLX4_STEERING_MODE_B0: - err = mlx4_unicast_promisc_remove(mdev->dev, - priv->base_qpn, - priv->port); + err = mlx4_unicast_promisc_add(mdev->dev, + priv->base_qpn, + priv->port); if (err) - en_err(priv, "Failed disabling unicast promiscuous mode\n"); - /* Disable Multicast promisc */ - if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { - err = mlx4_multicast_promisc_remove(mdev->dev, - priv->base_qpn, - priv->port); + en_err(priv, "Failed enabling unicast promiscuous mode\n"); + + /* Add the default qp number as multicast + * promisc + */ + if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { + err = mlx4_multicast_promisc_add(mdev->dev, + priv->base_qpn, + priv->port); if (err) - en_err(priv, "Failed disabling multicast promiscuous mode\n"); - priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + en_err(priv, "Failed enabling multicast promiscuous mode\n"); + priv->flags |= MLX4_EN_FLAG_MC_PROMISC; } break; case MLX4_STEERING_MODE_A0: err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, - priv->base_qpn, 0); + priv->base_qpn, + 1); if (err) - en_err(priv, "Failed disabling promiscuous mode\n"); + en_err(priv, "Failed enabling promiscuous mode\n"); break; } - /* Enable port VLAN filter */ + /* Disable port multicast filter (unconditionally) */ + err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, + 0, MLX4_MCAST_DISABLE); + if (err) + en_err(priv, "Failed disabling multicast filter\n"); + + /* Disable port VLAN filter */ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); if (err) - en_err(priv, "Failed enabling VLAN filter\n"); + en_err(priv, "Failed disabling VLAN filter\n"); } +} + +static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, + struct mlx4_en_dev *mdev) +{ + int err = 0; + + if (netif_msg_rx_status(priv)) + en_warn(priv, "Leaving promiscuous mode\n"); + priv->flags &= ~MLX4_EN_FLAG_PROMISC; + + /* Disable promiscouos mode */ + switch (mdev->dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + err = mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_PROMISC_UPLINK); + if (err) + en_err(priv, "Failed disabling promiscuous mode\n"); + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + break; + + case MLX4_STEERING_MODE_B0: + err = mlx4_unicast_promisc_remove(mdev->dev, + priv->base_qpn, + priv->port); + if (err) + en_err(priv, "Failed disabling unicast promiscuous mode\n"); + /* Disable Multicast promisc */ + if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { + err = mlx4_multicast_promisc_remove(mdev->dev, + priv->base_qpn, + priv->port); + if (err) + en_err(priv, "Failed disabling multicast promiscuous mode\n"); + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + } + break; + + case MLX4_STEERING_MODE_A0: + err = mlx4_SET_PORT_qpn_calc(mdev->dev, + priv->port, + priv->base_qpn, 0); + if (err) + en_err(priv, "Failed disabling promiscuous mode\n"); + break; + } + + /* Enable port VLAN filter */ + err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); + if (err) + en_err(priv, "Failed enabling VLAN filter\n"); +} + +static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, + struct net_device *dev, + struct mlx4_en_dev *mdev) +{ + struct mlx4_en_mc_list *mclist, *tmp; + u64 mcast_addr = 0; + u8 mc_list[16] = {0}; + int err = 0; /* Enable/disable the multicast filter according to IFF_ALLMULTI */ if (dev->flags & IFF_ALLMULTI) { @@ -767,9 +964,9 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) /* Update multicast list - we cache all addresses so they won't * change while HW is updated holding the command semaphor */ - netif_tx_lock_bh(dev); + netif_addr_lock_bh(dev); mlx4_en_cache_mclist(dev); - netif_tx_unlock_bh(dev); + netif_addr_unlock_bh(dev); list_for_each_entry(mclist, &priv->mc_list, list) { mcast_addr = mlx4_en_mac_to_u64(mclist->addr); mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, @@ -814,6 +1011,170 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) } } } +} + +static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, + struct net_device *dev, + struct mlx4_en_dev *mdev) +{ + struct netdev_hw_addr *ha; + struct mlx4_mac_entry *entry; + struct hlist_node *n, *tmp; + bool found; + u64 mac; + int err = 0; + struct hlist_head *bucket; + unsigned int i; + int removed = 0; + u32 prev_flags; + + /* Note that we do not need to protect our mac_hash traversal with rcu, + * since all modification code is protected by mdev->state_lock + */ + + /* find what to remove */ + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { + bucket = &priv->mac_hash[i]; + hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { + found = false; + netdev_for_each_uc_addr(ha, dev) { + if (ether_addr_equal_64bits(entry->mac, + ha->addr)) { + found = true; + break; + } + } + + /* MAC address of the port is not in uc list */ + if (ether_addr_equal_64bits(entry->mac, dev->dev_addr)) + found = true; + + if (!found) { + mac = mlx4_en_mac_to_u64(entry->mac); + mlx4_en_uc_steer_release(priv, entry->mac, + priv->base_qpn, + entry->reg_id); + mlx4_unregister_mac(mdev->dev, priv->port, mac); + + hlist_del_rcu(&entry->hlist); + kfree_rcu(entry, rcu); + en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", + entry->mac, priv->port); + ++removed; + } + } + } + + /* if we didn't remove anything, there is no use in trying to add + * again once we are in a forced promisc mode state + */ + if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed) + return; + + prev_flags = priv->flags; + priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; + + /* find what to add */ + netdev_for_each_uc_addr(ha, dev) { + found = false; + bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; + hlist_for_each_entry(entry, n, bucket, hlist) { + if (ether_addr_equal_64bits(entry->mac, ha->addr)) { + found = true; + break; + } + } + + if (!found) { + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n", + ha->addr, priv->port); + priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; + break; + } + mac = mlx4_en_mac_to_u64(ha->addr); + memcpy(entry->mac, ha->addr, ETH_ALEN); + err = mlx4_register_mac(mdev->dev, priv->port, mac); + if (err < 0) { + en_err(priv, "Failed registering MAC %pM on port %d: %d\n", + ha->addr, priv->port, err); + kfree(entry); + priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; + break; + } + err = mlx4_en_uc_steer_add(priv, ha->addr, + &priv->base_qpn, + &entry->reg_id); + if (err) { + en_err(priv, "Failed adding MAC %pM on port %d: %d\n", + ha->addr, priv->port, err); + mlx4_unregister_mac(mdev->dev, priv->port, mac); + kfree(entry); + priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; + break; + } else { + unsigned int mac_hash; + en_dbg(DRV, priv, "Added MAC %pM on port:%d\n", + ha->addr, priv->port); + mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX]; + bucket = &priv->mac_hash[mac_hash]; + hlist_add_head_rcu(&entry->hlist, bucket); + } + } + } + + if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) { + en_warn(priv, "Forcing promiscuous mode on port:%d\n", + priv->port); + } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) { + en_warn(priv, "Stop forcing promiscuous mode on port:%d\n", + priv->port); + } +} + +static void mlx4_en_do_set_rx_mode(struct work_struct *work) +{ + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + rx_mode_task); + struct mlx4_en_dev *mdev = priv->mdev; + struct net_device *dev = priv->dev; + + mutex_lock(&mdev->state_lock); + if (!mdev->device_up) { + en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); + goto out; + } + if (!priv->port_up) { + en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); + goto out; + } + + if (!netif_carrier_ok(dev)) { + if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { + if (priv->port_state.link_state) { + priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; + netif_carrier_on(dev); + en_dbg(LINK, priv, "Link Up\n"); + } + } + } + + if (dev->priv_flags & IFF_UNICAST_FLT) + mlx4_en_do_uc_filter(priv, dev, mdev); + + /* Promsicuous mode: disable all filters */ + if ((dev->flags & IFF_PROMISC) || + (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { + mlx4_en_set_promisc_mode(priv, mdev); + goto out; + } + + /* Not in promiscuous mode */ + if (priv->flags & MLX4_EN_FLAG_PROMISC) + mlx4_en_clear_promisc_mode(priv, mdev); + + mlx4_en_do_multicast(priv, dev, mdev); out: mutex_unlock(&mdev->state_lock); } @@ -876,9 +1237,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) priv->rx_usecs = MLX4_EN_RX_COAL_TIME; priv->tx_frames = MLX4_EN_TX_COAL_PKTS; priv->tx_usecs = MLX4_EN_TX_COAL_TIME; - en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " - "rx_frames:%d rx_usecs:%d\n", - priv->dev->mtu, priv->rx_frames, priv->rx_usecs); + en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n", + priv->dev->mtu, priv->rx_frames, priv->rx_usecs); /* Setup cq moderation params */ for (i = 0; i < priv->rx_ring_num; i++) { @@ -959,8 +1319,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) cq->moder_time = moder_time; err = mlx4_en_set_cq_moder(priv, cq); if (err) - en_err(priv, "Failed modifying moderation " - "for cq:%d\n", ring); + en_err(priv, "Failed modifying moderation for cq:%d\n", + ring); } priv->last_moder_packets[ring] = rx_packets; priv->last_moder_bytes[ring] = rx_bytes; @@ -977,12 +1337,12 @@ static void mlx4_en_do_get_stats(struct work_struct *work) struct mlx4_en_dev *mdev = priv->mdev; int err; - err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); - if (err) - en_dbg(HW, priv, "Could not update stats\n"); - mutex_lock(&mdev->state_lock); if (mdev->device_up) { + err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); + if (err) + en_dbg(HW, priv, "Could not update stats\n"); + if (priv->port_up) mlx4_en_auto_moderation(priv); @@ -1039,6 +1399,9 @@ int mlx4_en_start_port(struct net_device *dev) INIT_LIST_HEAD(&priv->mc_list); INIT_LIST_HEAD(&priv->curr_list); + INIT_LIST_HEAD(&priv->ethtool_list); + memset(&priv->ethtool_rules[0], 0, + sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES); /* Calculate Rx buf size */ dev->mtu = min(dev->mtu, priv->max_mtu); @@ -1074,8 +1437,7 @@ int mlx4_en_start_port(struct net_device *dev) /* Set qp number */ en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); - err = mlx4_get_eth_qp(mdev->dev, priv->port, - priv->mac, &priv->base_qpn); + err = mlx4_en_get_qp(priv); if (err) { en_err(priv, "Failed getting eth qp\n"); goto cq_err; @@ -1138,8 +1500,8 @@ int mlx4_en_start_port(struct net_device *dev) priv->prof->rx_pause, priv->prof->rx_ppp); if (err) { - en_err(priv, "Failed setting port general configurations " - "for port %d, with error %d\n", priv->port, err); + en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", + priv->port, err); goto tx_err; } /* Set default qp number */ @@ -1167,23 +1529,16 @@ int mlx4_en_start_port(struct net_device *dev) /* Must redo promiscuous mode setup. */ priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); - if (mdev->dev->caps.steering_mode == - MLX4_STEERING_MODE_DEVICE_MANAGED) { - mlx4_flow_steer_promisc_remove(mdev->dev, - priv->port, - MLX4_FS_PROMISC_UPLINK); - mlx4_flow_steer_promisc_remove(mdev->dev, - priv->port, - MLX4_FS_PROMISC_ALL_MULTI); - } /* Schedule multicast task to populate multicast list */ - queue_work(mdev->workqueue, &priv->mcast_task); + queue_work(mdev->workqueue, &priv->rx_mode_task); mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); priv->port_up = true; netif_tx_start_all_queues(dev); + netif_device_attach(dev); + return 0; tx_err: @@ -1195,7 +1550,7 @@ tx_err: rss_err: mlx4_en_release_rss_steer(priv); mac_err: - mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); + mlx4_en_put_qp(priv); cq_err: while (rx_index--) mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); @@ -1206,11 +1561,12 @@ cq_err: } -void mlx4_en_stop_port(struct net_device *dev) +void mlx4_en_stop_port(struct net_device *dev, int detach) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_mc_list *mclist, *tmp; + struct ethtool_flow_id *flow, *tmp_flow; int i; u8 mc_list[16] = {0}; @@ -1221,12 +1577,42 @@ void mlx4_en_stop_port(struct net_device *dev) /* Synchronize with tx routine */ netif_tx_lock_bh(dev); + if (detach) + netif_device_detach(dev); netif_tx_stop_all_queues(dev); netif_tx_unlock_bh(dev); + netif_tx_disable(dev); + /* Set port as not active */ priv->port_up = false; + /* Promsicuous mode */ + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + priv->flags &= ~(MLX4_EN_FLAG_PROMISC | + MLX4_EN_FLAG_MC_PROMISC); + mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_PROMISC_UPLINK); + mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_PROMISC_ALL_MULTI); + } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { + priv->flags &= ~MLX4_EN_FLAG_PROMISC; + + /* Disable promiscouos mode */ + mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, + priv->port); + + /* Disable Multicast promisc */ + if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { + mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, + priv->port); + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + } + } + /* Detach All multicasts */ memset(&mc_list[10], 0xff, ETH_ALEN); mc_list[5] = priv->port; /* needed for B0 steering support */ @@ -1263,8 +1649,20 @@ void mlx4_en_stop_port(struct net_device *dev) mlx4_en_release_rss_steer(priv); /* Unregister Mac address for the port */ - mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); - mdev->mac_removed[priv->port] = 1; + mlx4_en_put_qp(priv); + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN)) + mdev->mac_removed[priv->port] = 1; + + /* Remove flow steering rules for the port*/ + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + ASSERT_RTNL(); + list_for_each_entry_safe(flow, tmp_flow, + &priv->ethtool_list, list) { + mlx4_flow_detach(mdev->dev, flow->id); + list_del(&flow->list); + } + } /* Free RX Rings */ for (i = 0; i < priv->rx_ring_num; i++) { @@ -1284,15 +1682,12 @@ static void mlx4_en_restart(struct work_struct *work) watchdog_task); struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; - int i; en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); mutex_lock(&mdev->state_lock); if (priv->port_up) { - mlx4_en_stop_port(dev); - for (i = 0; i < priv->tx_ring_num; i++) - netdev_tx_reset_queue(priv->tx_ring[i].tx_queue); + mlx4_en_stop_port(dev, 1); if (mlx4_en_start_port(dev)) en_err(priv, "Failed restarting port %d\n", priv->port); } @@ -1362,7 +1757,7 @@ static int mlx4_en_close(struct net_device *dev) mutex_lock(&mdev->state_lock); - mlx4_en_stop_port(dev); + mlx4_en_stop_port(dev, 0); netif_carrier_off(dev); mutex_unlock(&mdev->state_lock); @@ -1437,9 +1832,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num); if (!priv->dev->rx_cpu_rmap) goto err; - - INIT_LIST_HEAD(&priv->filters); - spin_lock_init(&priv->filters_lock); #endif return 0; @@ -1503,7 +1895,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) * the port */ en_dbg(DRV, priv, "Change MTU called with card down!?\n"); } else { - mlx4_en_stop_port(dev); + mlx4_en_stop_port(dev, 1); err = mlx4_en_start_port(dev); if (err) { en_err(priv, "Failed restarting port:%d\n", @@ -1527,17 +1919,92 @@ static int mlx4_en_set_features(struct net_device *netdev, priv->ctrl_flags &= cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); + mlx4_en_update_loopback_state(netdev, features); + return 0; } +static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 flags) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_dev *mdev = priv->mdev->dev; + int err; + + if (!mlx4_is_mfunc(mdev)) + return -EOPNOTSUPP; + + /* Hardware does not support aging addresses, allow only + * permanent addresses if ndm_state is given + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + en_info(priv, "Add FDB only supports static addresses\n"); + return -EINVAL; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + else + err = -EINVAL; + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +static int mlx4_en_fdb_del(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_dev *mdev = priv->mdev->dev; + int err; + + if (!mlx4_is_mfunc(mdev)) + return -EOPNOTSUPP; + + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + en_info(priv, "Del FDB only supports static addresses\n"); + return -EINVAL; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + else + err = -EINVAL; + + return err; +} + +static int mlx4_en_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, int idx) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_dev *mdev = priv->mdev->dev; + + if (mlx4_is_mfunc(mdev)) + idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); + + return idx; +} + static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, .ndo_start_xmit = mlx4_en_xmit, .ndo_select_queue = mlx4_en_select_queue, .ndo_get_stats = mlx4_en_get_stats, - .ndo_set_rx_mode = mlx4_en_set_multicast, + .ndo_set_rx_mode = mlx4_en_set_rx_mode, .ndo_set_mac_address = mlx4_en_set_mac, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = mlx4_en_change_mtu, @@ -1552,6 +2019,9 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif + .ndo_fdb_add = mlx4_en_fdb_add, + .ndo_fdb_del = mlx4_en_fdb_del, + .ndo_fdb_dump = mlx4_en_fdb_dump, }; int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, @@ -1608,7 +2078,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->mac_index = -1; priv->msg_enable = MLX4_EN_MSG_LEVEL; spin_lock_init(&priv->stats_lock); - INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); + INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); INIT_WORK(&priv->watchdog_task, mlx4_en_restart); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); @@ -1618,22 +2088,35 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->dcbnl_ops = &mlx4_en_dcbnl_ops; #endif + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) + INIT_HLIST_HEAD(&priv->mac_hash[i]); + /* Query for default mac and max mtu */ priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; - priv->mac = mdev->dev->caps.def_mac[priv->port]; - if (ILLEGAL_MAC(priv->mac)) { - en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", - priv->port, priv->mac); + + /* Set default MAC */ + dev->addr_len = ETH_ALEN; + mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); + if (!is_valid_ether_addr(dev->dev_addr)) { + en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", + priv->port, dev->dev_addr); err = -EINVAL; goto out; } + memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac)); + priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + DS_SIZE * MLX4_EN_MAX_RX_FRAGS); err = mlx4_en_alloc_resources(priv); if (err) goto out; +#ifdef CONFIG_RFS_ACCEL + INIT_LIST_HEAD(&priv->filters); + spin_lock_init(&priv->filters_lock); +#endif + /* Allocate page for receive rings */ err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); @@ -1653,13 +2136,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); - /* Set defualt MAC */ - dev->addr_len = ETH_ALEN; - for (i = 0; i < ETH_ALEN; i++) { - dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); - dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); - } - /* * Set driver features */ @@ -1679,6 +2155,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, MLX4_STEERING_MODE_DEVICE_MANAGED) dev->hw_features |= NETIF_F_NTUPLE; + if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) + dev->priv_flags |= IFF_UNICAST_FLT; + mdev->pndev[port] = dev; netif_carrier_off(dev); @@ -1692,6 +2171,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); + mlx4_en_update_loopback_state(priv->dev, priv->dev->features); + /* Configure port */ mlx4_en_calc_rx_buf(dev); err = mlx4_SET_PORT_general(mdev->dev, priv->port, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index fed26d867f4e..ce38654bbdd0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -563,9 +563,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud unsigned int length; int polled = 0; int ip_summed; - struct ethhdr *ethh; - dma_addr_t dma; - u64 s_mac; int factor = priv->cqe_factor; if (!priv->port_up) @@ -603,21 +600,41 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud goto next; } - /* Get pointer to first fragment since we haven't skb yet and - * cast it to ethhdr struct */ - dma = be64_to_cpu(rx_desc->data[0].addr); - dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), - DMA_FROM_DEVICE); - ethh = (struct ethhdr *)(page_address(frags[0].page) + - frags[0].offset); - s_mac = mlx4_en_mac_to_u64(ethh->h_source); - - /* If source MAC is equal to our own MAC and not performing - * the selftest or flb disabled - drop the packet */ - if (s_mac == priv->mac && - !((dev->features & NETIF_F_LOOPBACK) || - priv->validate_loopback)) - goto next; + /* Check if we need to drop the packet if SRIOV is not enabled + * and not performing the selftest or flb disabled + */ + if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) { + struct ethhdr *ethh; + dma_addr_t dma; + /* Get pointer to first fragment since we haven't + * skb yet and cast it to ethhdr struct + */ + dma = be64_to_cpu(rx_desc->data[0].addr); + dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), + DMA_FROM_DEVICE); + ethh = (struct ethhdr *)(page_address(frags[0].page) + + frags[0].offset); + + if (is_multicast_ether_addr(ethh->h_dest)) { + struct mlx4_mac_entry *entry; + struct hlist_node *n; + struct hlist_head *bucket; + unsigned int mac_hash; + + /* Drop the packet, since HW loopback-ed it */ + mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; + bucket = &priv->mac_hash[mac_hash]; + rcu_read_lock(); + hlist_for_each_entry_rcu(entry, n, bucket, hlist) { + if (ether_addr_equal_64bits(entry->mac, + ethh->h_source)) { + rcu_read_unlock(); + goto next; + } + } + rcu_read_unlock(); + } + } /* * Packet is OK - process it. @@ -835,11 +852,9 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, struct mlx4_qp_context *context; int err = 0; - context = kmalloc(sizeof *context , GFP_KERNEL); - if (!context) { - en_err(priv, "Failed to allocate qp context\n"); + context = kmalloc(sizeof(*context), GFP_KERNEL); + if (!context) return -ENOMEM; - } err = mlx4_qp_alloc(mdev->dev, qpn, qp); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index bf2e5d3f177c..3488c6d9e6b5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -87,6 +87,8 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) priv->loopback_ok = 0; priv->validate_loopback = 1; + mlx4_en_update_loopback_state(priv->dev, priv->dev->features); + /* xmit */ if (mlx4_en_test_loopback_xmit(priv)) { en_err(priv, "Transmitting loopback packet failed\n"); @@ -107,6 +109,7 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) mlx4_en_test_loopback_exit: priv->validate_loopback = 0; + mlx4_en_update_loopback_state(priv->dev, priv->dev->features); return !loopback_ok; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 6771b69f40d5..49308cc65ee7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -294,6 +294,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) cnt++; } + netdev_tx_reset_queue(ring->tx_queue); + if (cnt) en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); @@ -515,10 +517,6 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk wmb(); inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); } - tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag); - tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * - (!!vlan_tx_tag_present(skb)); - tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; } u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) @@ -592,7 +590,21 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(ring->tx_queue); priv->port_stats.queue_stopped++; - return NETDEV_TX_BUSY; + /* If queue was emptied after the if, and before the + * stop_queue - need to wake the queue, or else it will remain + * stopped forever. + * Need a memory barrier to make sure ring->cons was not + * updated before queue was stopped. + */ + wmb(); + + if (unlikely(((int)(ring->prod - ring->cons)) <= + ring->size - HEADROOM - MAX_DESC_TXBBS)) { + netif_tx_wake_queue(ring->tx_queue); + priv->port_stats.wake_queue++; + } else { + return NETDEV_TX_BUSY; + } } /* Track current inflight packets for performance analysis */ @@ -630,7 +642,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->tx_csum++; } - if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) { + if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) { /* Copy dst mac address to wqe. This allows loopback in eSwitch, * so that VFs and PF can communicate with each other */ diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 8b3d0512a46b..38b62c78d5da 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -127,7 +127,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [0] = "RSS support", [1] = "RSS Toeplitz Hash Function support", [2] = "RSS XOR Hash Function support", - [3] = "Device manage flow steering support" + [3] = "Device manage flow steering support", + [4] = "Automatic mac reassignment support" }; int i; @@ -478,6 +479,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 +#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d dev_cap->flags2 = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -637,6 +639,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) QUERY_DEV_CAP_BMME_FLAGS_OFFSET); MLX4_GET(dev_cap->reserved_lkey, outbox, QUERY_DEV_CAP_RSVD_LKEY_OFFSET); + MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); + if (field & 1<<6) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN; MLX4_GET(dev_cap->max_icm_sz, outbox, QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) @@ -1287,14 +1292,14 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) /* Enable Ethernet flow steering * with udp unicast and tcp unicast */ - MLX4_PUT(inbox, param->fs_hash_enable_bits, + MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), INIT_HCA_FS_ETH_BITS_OFFSET); MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); /* Enable IPoIB flow steering * with udp unicast and tcp unicast */ - MLX4_PUT(inbox, param->fs_hash_enable_bits, + MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), INIT_HCA_FS_IB_BITS_OFFSET); MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index dbf2f69cc59f..3af33ff669cc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -171,7 +171,6 @@ struct mlx4_init_hca_param { u8 log_mpt_sz; u8 log_uar_sz; u8 uar_page_sz; /* log pg sz in 4k chunks */ - u8 fs_hash_enable_bits; u8 steering_mode; /* for QUERY_HCA */ u64 dev_cap_enabled; }; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 5163af314990..b9dde139dac5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1415,22 +1415,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev) if (mlx4_is_master(dev)) mlx4_parav_master_pf_caps(dev); - priv->fs_hash_mode = MLX4_FS_L2_HASH; - - switch (priv->fs_hash_mode) { - case MLX4_FS_L2_HASH: - init_hca.fs_hash_enable_bits = 0; - break; - - case MLX4_FS_L2_L3_L4_HASH: - /* Enable flow steering with - * udp unicast and tcp unicast - */ - init_hca.fs_hash_enable_bits = - MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN; - break; - } - profile = default_profile; if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) @@ -1849,12 +1833,9 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) info->dev = dev; info->port = port; if (!mlx4_is_slave(dev)) { - INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL); mlx4_init_mac_table(dev, &info->mac_table); mlx4_init_vlan_table(dev, &info->vlan_table); - info->base_qpn = - dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + - (port - 1) * (1 << log_num_mac); + info->base_qpn = mlx4_get_base_qpn(dev, port); } sprintf(info->dev_name, "mlx4_port%d", port); @@ -2070,10 +2051,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) /* Allow large DMA segments, up to the firmware limit of 1 GB */ dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); - priv = kzalloc(sizeof *priv, GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { - dev_err(&pdev->dev, "Device struct alloc failed, " - "aborting.\n"); err = -ENOMEM; goto err_release_regions; } @@ -2162,7 +2141,8 @@ slave_start: dev->num_slaves = MLX4_MAX_NUM_SLAVES; else { dev->num_slaves = 0; - if (mlx4_multi_func_init(dev)) { + err = mlx4_multi_func_init(dev); + if (err) { mlx4_err(dev, "Failed to init slave mfunc" " interface, aborting.\n"); goto err_cmd; @@ -2186,7 +2166,8 @@ slave_start: /* In master functions, the communication channel must be initialized * after obtaining its address from fw */ if (mlx4_is_master(dev)) { - if (mlx4_multi_func_init(dev)) { + err = mlx4_multi_func_init(dev); + if (err) { mlx4_err(dev, "Failed to init master mfunc" "interface, aborting.\n"); goto err_close; @@ -2203,6 +2184,7 @@ slave_start: mlx4_enable_msi_x(dev); if ((mlx4_is_mfunc(dev)) && !(dev->flags & MLX4_FLAG_MSI_X)) { + err = -ENOSYS; mlx4_err(dev, "INTx is not supported in multi-function mode." " aborting.\n"); goto err_free_eq; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 1ee4db3c6400..52685524708d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -664,7 +664,7 @@ static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, dw |= ctrl->priority << 16; hw->ctrl = cpu_to_be32(dw); - hw->vf_vep_port = cpu_to_be32(ctrl->port); + hw->port = ctrl->port; hw->qpn = cpu_to_be32(ctrl->qpn); } @@ -1157,7 +1157,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], .priority = MLX4_DOMAIN_NIC, }; - rule.allow_loopback = ~block_mcast_loopback; + rule.allow_loopback = !block_mcast_loopback; rule.port = port; rule.qpn = qp->qpn; INIT_LIST_HEAD(&rule.list); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 116c5c29d2d1..ed4a6959e828 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -60,11 +60,6 @@ #define MLX4_FS_MGM_LOG_ENTRY_SIZE 7 #define MLX4_FS_NUM_MCG (1 << 17) -enum { - MLX4_FS_L2_HASH = 0, - MLX4_FS_L2_L3_L4_HASH, -}; - #define MLX4_NUM_UP 8 #define MLX4_NUM_TC 8 #define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */ @@ -658,11 +653,6 @@ struct mlx4_set_port_rqp_calc_context { __be32 mcast; }; -struct mlx4_mac_entry { - u64 mac; - u64 reg_id; -}; - struct mlx4_port_info { struct mlx4_dev *dev; int port; @@ -672,7 +662,6 @@ struct mlx4_port_info { char dev_mtu_name[16]; struct device_attribute port_mtu_attr; struct mlx4_mac_table mac_table; - struct radix_tree_root mac_tree; struct mlx4_vlan_table vlan_table; int base_qpn; }; @@ -696,9 +685,12 @@ struct mlx4_steer { struct mlx4_net_trans_rule_hw_ctrl { __be32 ctrl; - __be32 vf_vep_port; + u8 rsvd1; + u8 funcid; + u8 vep; + u8 port; __be32 qpn; - __be32 reserved; + __be32 rsvd2; }; struct mlx4_net_trans_rule_hw_ib { @@ -918,7 +910,6 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); -int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list); int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 8d54412ada63..c313d7e943a9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -198,7 +198,6 @@ enum cq_type { */ #define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x)) #define XNOR(x, y) (!(x) == !(y)) -#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0) struct mlx4_en_tx_info { @@ -427,10 +426,26 @@ struct mlx4_en_frag_info { #endif struct ethtool_flow_id { + struct list_head list; struct ethtool_rx_flow_spec flow_spec; u64 id; }; +enum { + MLX4_EN_FLAG_PROMISC = (1 << 0), + MLX4_EN_FLAG_MC_PROMISC = (1 << 1), + /* whether we need to enable hardware loopback by putting dmac + * in Tx WQE + */ + MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2), + /* whether we need to drop packets that hardware loopback-ed */ + MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3), + MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4) +}; + +#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE) +#define MLX4_EN_MAC_HASH_IDX 5 + struct mlx4_en_priv { struct mlx4_en_dev *mdev; struct mlx4_en_port_profile *prof; @@ -441,6 +456,8 @@ struct mlx4_en_priv { struct mlx4_en_port_state port_state; spinlock_t stats_lock; struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; + /* To allow rules removal while port is going down */ + struct list_head ethtool_list; unsigned long last_moder_packets[MAX_RX_RINGS]; unsigned long last_moder_tx_packets; @@ -469,7 +486,7 @@ struct mlx4_en_priv { int registered; int allocated; int stride; - u64 mac; + unsigned char prev_mac[ETH_ALEN + 2]; int mac_index; unsigned max_mtu; int base_qpn; @@ -478,8 +495,6 @@ struct mlx4_en_priv { struct mlx4_en_rss_map rss_map; __be32 ctrl_flags; u32 flags; -#define MLX4_EN_FLAG_PROMISC 0x1 -#define MLX4_EN_FLAG_MC_PROMISC 0x2 u8 num_tx_rings_p_up; u32 tx_ring_num; u32 rx_ring_num; @@ -493,7 +508,7 @@ struct mlx4_en_priv { struct mlx4_en_cq *tx_cq; struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; struct mlx4_qp drop_qp; - struct work_struct mcast_task; + struct work_struct rx_mode_task; struct work_struct mac_task; struct work_struct watchdog_task; struct work_struct linkstate_task; @@ -510,6 +525,7 @@ struct mlx4_en_priv { bool wol; struct device *ddev; int base_tx_qpn; + struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE]; #ifdef CONFIG_MLX4_EN_DCB struct ieee_ets ets; @@ -529,14 +545,24 @@ enum mlx4_en_wol { MLX4_EN_WOL_ENABLED = (1ULL << 62), }; +struct mlx4_mac_entry { + struct hlist_node hlist; + unsigned char mac[ETH_ALEN + 2]; + u64 reg_id; + struct rcu_head rcu; +}; + #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) +void mlx4_en_update_loopback_state(struct net_device *dev, + netdev_features_t features); + void mlx4_en_destroy_netdev(struct net_device *dev); int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, struct mlx4_en_port_profile *prof); int mlx4_en_start_port(struct net_device *dev); -void mlx4_en_stop_port(struct net_device *dev); +void mlx4_en_stop_port(struct net_device *dev, int detach); void mlx4_en_free_resources(struct mlx4_en_priv *priv); int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 4c51b05efa28..719ead15e491 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -74,87 +74,6 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) table->total = 0; } -static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, - u64 mac, int *qpn, u64 *reg_id) -{ - __be64 be_mac; - int err; - - mac &= MLX4_MAC_MASK; - be_mac = cpu_to_be64(mac << 16); - - switch (dev->caps.steering_mode) { - case MLX4_STEERING_MODE_B0: { - struct mlx4_qp qp; - u8 gid[16] = {0}; - - qp.qpn = *qpn; - memcpy(&gid[10], &be_mac, ETH_ALEN); - gid[5] = port; - - err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); - break; - } - case MLX4_STEERING_MODE_DEVICE_MANAGED: { - struct mlx4_spec_list spec_eth = { {NULL} }; - __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); - - struct mlx4_net_trans_rule rule = { - .queue_mode = MLX4_NET_TRANS_Q_FIFO, - .exclusive = 0, - .allow_loopback = 1, - .promisc_mode = MLX4_FS_PROMISC_NONE, - .priority = MLX4_DOMAIN_NIC, - }; - - rule.port = port; - rule.qpn = *qpn; - INIT_LIST_HEAD(&rule.list); - - spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; - memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN); - memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); - list_add_tail(&spec_eth.list, &rule.list); - - err = mlx4_flow_attach(dev, &rule, reg_id); - break; - } - default: - return -EINVAL; - } - if (err) - mlx4_warn(dev, "Failed Attaching Unicast\n"); - - return err; -} - -static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, - u64 mac, int qpn, u64 reg_id) -{ - switch (dev->caps.steering_mode) { - case MLX4_STEERING_MODE_B0: { - struct mlx4_qp qp; - u8 gid[16] = {0}; - __be64 be_mac; - - qp.qpn = qpn; - mac &= MLX4_MAC_MASK; - be_mac = cpu_to_be64(mac << 16); - memcpy(&gid[10], &be_mac, ETH_ALEN); - gid[5] = port; - - mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); - break; - } - case MLX4_STEERING_MODE_DEVICE_MANAGED: { - mlx4_flow_detach(dev, reg_id); - break; - } - default: - mlx4_err(dev, "Invalid steering mode.\n"); - } -} - static int validate_index(struct mlx4_dev *dev, struct mlx4_mac_table *table, int index) { @@ -181,92 +100,6 @@ static int find_index(struct mlx4_dev *dev, return -EINVAL; } -int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) -{ - struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; - struct mlx4_mac_entry *entry; - int index = 0; - int err = 0; - u64 reg_id; - - mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n", - (unsigned long long) mac); - index = mlx4_register_mac(dev, port, mac); - if (index < 0) { - err = index; - mlx4_err(dev, "Failed adding MAC: 0x%llx\n", - (unsigned long long) mac); - return err; - } - - if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { - *qpn = info->base_qpn + index; - return 0; - } - - err = mlx4_qp_reserve_range(dev, 1, 1, qpn); - mlx4_dbg(dev, "Reserved qp %d\n", *qpn); - if (err) { - mlx4_err(dev, "Failed to reserve qp for mac registration\n"); - goto qp_err; - } - - err = mlx4_uc_steer_add(dev, port, mac, qpn, ®_id); - if (err) - goto steer_err; - - entry = kmalloc(sizeof *entry, GFP_KERNEL); - if (!entry) { - err = -ENOMEM; - goto alloc_err; - } - entry->mac = mac; - entry->reg_id = reg_id; - err = radix_tree_insert(&info->mac_tree, *qpn, entry); - if (err) - goto insert_err; - return 0; - -insert_err: - kfree(entry); - -alloc_err: - mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id); - -steer_err: - mlx4_qp_release_range(dev, *qpn, 1); - -qp_err: - mlx4_unregister_mac(dev, port, mac); - return err; -} -EXPORT_SYMBOL_GPL(mlx4_get_eth_qp); - -void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn) -{ - struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; - struct mlx4_mac_entry *entry; - - mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n", - (unsigned long long) mac); - mlx4_unregister_mac(dev, port, mac); - - if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { - entry = radix_tree_lookup(&info->mac_tree, qpn); - if (entry) { - mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx," - " qpn %d\n", port, - (unsigned long long) mac, qpn); - mlx4_uc_steer_release(dev, port, entry->mac, - qpn, entry->reg_id); - mlx4_qp_release_range(dev, qpn, 1); - radix_tree_delete(&info->mac_tree, qpn); - kfree(entry); - } - } -} -EXPORT_SYMBOL_GPL(mlx4_put_eth_qp); - static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, __be64 *entries) { @@ -359,6 +192,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) } EXPORT_SYMBOL_GPL(mlx4_register_mac); +int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port) +{ + return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + + (port - 1) * (1 << dev->caps.log_num_macs); +} +EXPORT_SYMBOL_GPL(mlx4_get_base_qpn); void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) { @@ -397,29 +236,13 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) } EXPORT_SYMBOL_GPL(mlx4_unregister_mac); -int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) +int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) { struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; struct mlx4_mac_table *table = &info->mac_table; - struct mlx4_mac_entry *entry; int index = qpn - info->base_qpn; int err = 0; - if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { - entry = radix_tree_lookup(&info->mac_tree, qpn); - if (!entry) - return -EINVAL; - mlx4_uc_steer_release(dev, port, entry->mac, - qpn, entry->reg_id); - mlx4_unregister_mac(dev, port, entry->mac); - entry->mac = new_mac; - entry->reg_id = 0; - mlx4_register_mac(dev, port, new_mac); - err = mlx4_uc_steer_add(dev, port, entry->mac, - &qpn, &entry->reg_id); - return err; - } - /* CX1 doesn't support multi-functions */ mutex_lock(&table->mutex); @@ -439,7 +262,7 @@ out: mutex_unlock(&table->mutex); return err; } -EXPORT_SYMBOL_GPL(mlx4_replace_mac); +EXPORT_SYMBOL_GPL(__mlx4_replace_mac); static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, __be32 *entries) diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 561ed2a22a17..5997adc943d0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -3018,7 +3018,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave, __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; - port = be32_to_cpu(ctrl->vf_vep_port) & 0xff; + port = ctrl->port; eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1); /* Clear a space in the inbox for eth header */ diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index b71eb39ab448..fbcb9e74d7fc 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -1080,7 +1080,6 @@ static int ks8842_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - netdev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(netdev->dev_addr, mac, netdev->addr_len); ks8842_write_mac_addr(adapter, mac); diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 286816a4e783..33bcb63d56a2 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -69,7 +69,6 @@ union ks8851_tx_hdr { * @mii: The MII state information for the mii calls. * @rxctrl: RX settings for @rxctrl_work. * @tx_work: Work queue for tx packets - * @irq_work: Work queue for servicing interrupts * @rxctrl_work: Work queue for updating RX mode and multicast lists * @txq: Queue of packets for transmission. * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1. @@ -121,7 +120,6 @@ struct ks8851_net { struct ks8851_rxctrl rxctrl; struct work_struct tx_work; - struct work_struct irq_work; struct work_struct rxctrl_work; struct sk_buff_head txq; @@ -444,23 +442,6 @@ static void ks8851_init_mac(struct ks8851_net *ks) } /** - * ks8851_irq - device interrupt handler - * @irq: Interrupt number passed from the IRQ handler. - * @pw: The private word passed to register_irq(), our struct ks8851_net. - * - * Disable the interrupt from happening again until we've processed the - * current status by scheduling ks8851_irq_work(). - */ -static irqreturn_t ks8851_irq(int irq, void *pw) -{ - struct ks8851_net *ks = pw; - - disable_irq_nosync(irq); - schedule_work(&ks->irq_work); - return IRQ_HANDLED; -} - -/** * ks8851_rdfifo - read data from the receive fifo * @ks: The device state. * @buff: The buffer address @@ -595,19 +576,20 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) } /** - * ks8851_irq_work - work queue handler for dealing with interrupt requests - * @work: The work structure that was scheduled by schedule_work() + * ks8851_irq - IRQ handler for dealing with interrupt requests + * @irq: IRQ number + * @_ks: cookie * - * This is the handler invoked when the ks8851_irq() is called to find out - * what happened, as we cannot allow ourselves to sleep whilst waiting for - * anything other process has the chip's lock. + * This handler is invoked when the IRQ line asserts to find out what happened. + * As we cannot allow ourselves to sleep in HARDIRQ context, this handler runs + * in thread context. * * Read the interrupt status, work out what needs to be done and then clear * any of the interrupts that are not needed. */ -static void ks8851_irq_work(struct work_struct *work) +static irqreturn_t ks8851_irq(int irq, void *_ks) { - struct ks8851_net *ks = container_of(work, struct ks8851_net, irq_work); + struct ks8851_net *ks = _ks; unsigned status; unsigned handled = 0; @@ -688,7 +670,7 @@ static void ks8851_irq_work(struct work_struct *work) if (status & IRQ_TXI) netif_wake_queue(ks->netdev); - enable_irq(ks->netdev->irq); + return IRQ_HANDLED; } /** @@ -896,7 +878,6 @@ static int ks8851_net_stop(struct net_device *dev) mutex_unlock(&ks->lock); /* stop any outstanding work */ - flush_work(&ks->irq_work); flush_work(&ks->tx_work); flush_work(&ks->rxctrl_work); @@ -1052,7 +1033,6 @@ static int ks8851_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; - dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); return ks8851_write_mac_addr(dev); } @@ -1438,7 +1418,6 @@ static int ks8851_probe(struct spi_device *spi) spin_lock_init(&ks->statelock); INIT_WORK(&ks->tx_work, ks8851_tx_work); - INIT_WORK(&ks->irq_work, ks8851_irq_work); INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work); /* initialise pre-made spi transfer messages */ @@ -1505,8 +1484,9 @@ static int ks8851_probe(struct spi_device *spi) ks8851_read_selftest(ks); ks8851_init_mac(ks); - ret = request_irq(spi->irq, ks8851_irq, IRQF_TRIGGER_LOW, - ndev->name, ks); + ret = request_threaded_irq(spi->irq, NULL, ks8851_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + ndev->name, ks); if (ret < 0) { dev_err(&spi->dev, "failed to get irq\n"); goto err_irq; diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index ef8f9f92e547..a343066f7b43 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1237,7 +1237,6 @@ static int ks_set_mac_address(struct net_device *netdev, void *paddr) struct sockaddr *addr = paddr; u8 *da; - netdev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); da = (u8 *)netdev->dev_addr; diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index a99456c3dd87..5d98a9f7bfc7 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -527,7 +527,6 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; - dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, address->sa_data, dev->addr_len); return enc28j60_set_hw_macaddr(dev); } diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index f8408d6e961c..4f9937e026e5 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -664,10 +664,9 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp) /* copy header of running firmware from SRAM to host memory to * validate firmware */ hdr = kmalloc(bytes, GFP_KERNEL); - if (hdr == NULL) { - dev_err(dev, "could not malloc firmware hdr\n"); + if (hdr == NULL) return -ENOMEM; - } + memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes); status = myri10ge_validate_firmware(mgp, hdr); kfree(hdr); diff --git a/drivers/net/ethernet/natsemi/ibmlana.c b/drivers/net/ethernet/natsemi/ibmlana.c deleted file mode 100644 index 923e640d604c..000000000000 --- a/drivers/net/ethernet/natsemi/ibmlana.c +++ /dev/null @@ -1,1075 +0,0 @@ -/* -net-3-driver for the IBM LAN Adapter/A - -This is an extension to the Linux operating system, and is covered by the -same GNU General Public License that covers that work. - -Copyright 1999 by Alfred Arnold (alfred@ccac.rwth-aachen.de, - alfred.arnold@lancom.de) - -This driver is based both on the SK_MCA driver, which is itself based on the -SK_G16 and 3C523 driver. - -paper sources: - 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by - Hans-Peter Messmer for the basic Microchannel stuff - - 'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer - for help on Ethernet driver programming - - 'DP83934CVUL-20/25 MHz SONIC-T Ethernet Controller Datasheet' by National - Semiconductor for info on the MAC chip - - 'LAN Technical Reference Ethernet Adapter Interface Version 1 Release 1.0 - Document Number SC30-3661-00' by IBM for info on the adapter itself - - Also see http://www.national.com/analog - -special acknowledgements to: - - Bob Eager for helping me out with documentation from IBM - - Jim Shorney for his endless patience with me while I was using - him as a beta tester to trace down the address filter bug ;-) - - Missing things: - - -> set debug level via ioctl instead of compile-time switches - -> I didn't follow the development of the 2.1.x kernels, so my - assumptions about which things changed with which kernel version - are probably nonsense - -History: - Nov 6th, 1999 - startup from SK_MCA driver - Dec 6th, 1999 - finally got docs about the card. A big thank you to Bob Eager! - Dec 12th, 1999 - first packet received - Dec 13th, 1999 - recv queue done, tcpdump works - Dec 15th, 1999 - transmission part works - Dec 28th, 1999 - added usage of the isa_functions for Linux 2.3 . Things should - still work with 2.0.x.... - Jan 28th, 2000 - in Linux 2.2.13, the version.h file mysteriously didn't get - included. Added a workaround for this. Furthermore, it now - not only compiles as a modules ;-) - Jan 30th, 2000 - newer kernels automatically probe more than one board, so the - 'startslot' as a variable is also needed here - Apr 12th, 2000 - the interrupt mask register is not set 'hard' instead of individually - setting registers, since this seems to set bits that shouldn't be - set - May 21st, 2000 - reset interrupt status immediately after CAM load - add a recovery delay after releasing the chip's reset line - May 24th, 2000 - finally found the bug in the address filter setup - damned signed - chars! - June 1st, 2000 - corrected version codes, added support for the latest 2.3 changes - Oct 28th, 2002 - cleaned up for the 2.5 tree <alan@lxorguk.ukuu.org.uk> - - *************************************************************************/ - -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/time.h> -#include <linux/mca.h> -#include <linux/module.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/if_ether.h> -#include <linux/skbuff.h> -#include <linux/bitops.h> - -#include <asm/processor.h> -#include <asm/io.h> - -#define _IBM_LANA_DRIVER_ -#include "ibmlana.h" - -#undef DEBUG - -#define DRV_NAME "ibmlana" - -/* ------------------------------------------------------------------------ - * global static data - not more since we can handle multiple boards and - * have to pack all state info into the device struct! - * ------------------------------------------------------------------------ */ - -static char *MediaNames[Media_Count] = { - "10BaseT", "10Base5", "Unknown", "10Base2" -}; - -/* ------------------------------------------------------------------------ - * private subfunctions - * ------------------------------------------------------------------------ */ - -#ifdef DEBUG - /* dump all registers */ - -static void dumpregs(struct net_device *dev) -{ - int z; - - for (z = 0; z < 160; z += 2) { - if (!(z & 15)) - printk("REGS: %04x:", z); - printk(" %04x", inw(dev->base_addr + z)); - if ((z & 15) == 14) - printk("\n"); - } -} - -/* dump parts of shared memory - only needed during debugging */ - -static void dumpmem(struct net_device *dev, u32 start, u32 len) -{ - ibmlana_priv *priv = netdev_priv(dev); - int z; - - printk("Address %04x:\n", start); - for (z = 0; z < len; z++) { - if ((z & 15) == 0) - printk("%04x:", z); - printk(" %02x", readb(priv->base + start + z)); - if ((z & 15) == 15) - printk("\n"); - } - if ((z & 15) != 0) - printk("\n"); -} - -/* print exact time - ditto */ - -static void PrTime(void) -{ - struct timeval tv; - - do_gettimeofday(&tv); - printk("%9d:%06d: ", (int) tv.tv_sec, (int) tv.tv_usec); -} -#endif /* DEBUG */ - -/* deduce resources out of POS registers */ - -static void getaddrs(struct mca_device *mdev, int *base, int *memlen, - int *iobase, int *irq, ibmlana_medium *medium) -{ - u_char pos0, pos1; - - pos0 = mca_device_read_stored_pos(mdev, 2); - pos1 = mca_device_read_stored_pos(mdev, 3); - - *base = 0xc0000 + ((pos1 & 0xf0) << 9); - *memlen = (pos1 & 0x01) ? 0x8000 : 0x4000; - *iobase = (pos0 & 0xe0) << 7; - switch (pos0 & 0x06) { - case 0: - *irq = 5; - break; - case 2: - *irq = 15; - break; - case 4: - *irq = 10; - break; - case 6: - *irq = 11; - break; - } - *medium = (pos0 & 0x18) >> 3; -} - -/* wait on register value with mask and timeout */ - -static int wait_timeout(struct net_device *dev, int regoffs, u16 mask, - u16 value, int timeout) -{ - unsigned long fin = jiffies + timeout; - - while (time_before(jiffies,fin)) - if ((inw(dev->base_addr + regoffs) & mask) == value) - return 1; - - return 0; -} - - -/* reset the whole board */ - -static void ResetBoard(struct net_device *dev) -{ - unsigned char bcmval; - - /* read original board control value */ - - bcmval = inb(dev->base_addr + BCMREG); - - /* set reset bit for a while */ - - bcmval |= BCMREG_RESET; - outb(bcmval, dev->base_addr + BCMREG); - udelay(10); - bcmval &= ~BCMREG_RESET; - outb(bcmval, dev->base_addr + BCMREG); - - /* switch over to RAM again */ - - bcmval |= BCMREG_RAMEN | BCMREG_RAMWIN; - outb(bcmval, dev->base_addr + BCMREG); -} - -/* calculate RAM layout & set up descriptors in RAM */ - -static void InitDscrs(struct net_device *dev) -{ - ibmlana_priv *priv = netdev_priv(dev); - u32 addr, baddr, raddr; - int z; - tda_t tda; - rda_t rda; - rra_t rra; - - /* initialize RAM */ - - memset_io(priv->base, 0xaa, - dev->mem_start - dev->mem_start); /* XXX: typo? */ - - /* setup n TX descriptors - independent of RAM size */ - - priv->tdastart = addr = 0; - priv->txbufstart = baddr = sizeof(tda_t) * TXBUFCNT; - for (z = 0; z < TXBUFCNT; z++) { - tda.status = 0; - tda.config = 0; - tda.length = 0; - tda.fragcount = 1; - tda.startlo = baddr; - tda.starthi = 0; - tda.fraglength = 0; - if (z == TXBUFCNT - 1) - tda.link = priv->tdastart; - else - tda.link = addr + sizeof(tda_t); - tda.link |= 1; - memcpy_toio(priv->base + addr, &tda, sizeof(tda_t)); - addr += sizeof(tda_t); - baddr += PKTSIZE; - } - - /* calculate how many receive buffers fit into remaining memory */ - - priv->rxbufcnt = (dev->mem_end - dev->mem_start - baddr) / (sizeof(rra_t) + sizeof(rda_t) + PKTSIZE); - - /* calculate receive addresses */ - - priv->rrastart = raddr = priv->txbufstart + (TXBUFCNT * PKTSIZE); - priv->rdastart = addr = priv->rrastart + (priv->rxbufcnt * sizeof(rra_t)); - priv->rxbufstart = baddr = priv->rdastart + (priv->rxbufcnt * sizeof(rda_t)); - - for (z = 0; z < priv->rxbufcnt; z++) { - rra.startlo = baddr; - rra.starthi = 0; - rra.cntlo = PKTSIZE >> 1; - rra.cnthi = 0; - memcpy_toio(priv->base + raddr, &rra, sizeof(rra_t)); - - rda.status = 0; - rda.length = 0; - rda.startlo = 0; - rda.starthi = 0; - rda.seqno = 0; - if (z < priv->rxbufcnt - 1) - rda.link = addr + sizeof(rda_t); - else - rda.link = 1; - rda.inuse = 1; - memcpy_toio(priv->base + addr, &rda, sizeof(rda_t)); - - baddr += PKTSIZE; - raddr += sizeof(rra_t); - addr += sizeof(rda_t); - } - - /* initialize current pointers */ - - priv->nextrxdescr = 0; - priv->lastrxdescr = priv->rxbufcnt - 1; - priv->nexttxdescr = 0; - priv->currtxdescr = 0; - priv->txusedcnt = 0; - memset(priv->txused, 0, sizeof(priv->txused)); -} - -/* set up Rx + Tx descriptors in SONIC */ - -static int InitSONIC(struct net_device *dev) -{ - ibmlana_priv *priv = netdev_priv(dev); - - /* set up start & end of resource area */ - - outw(0, SONIC_URRA); - outw(priv->rrastart, dev->base_addr + SONIC_RSA); - outw(priv->rrastart + (priv->rxbufcnt * sizeof(rra_t)), dev->base_addr + SONIC_REA); - outw(priv->rrastart, dev->base_addr + SONIC_RRP); - outw(priv->rrastart, dev->base_addr + SONIC_RWP); - - /* set EOBC so that only one packet goes into one buffer */ - - outw((PKTSIZE - 4) >> 1, dev->base_addr + SONIC_EOBC); - - /* let SONIC read the first RRA descriptor */ - - outw(CMDREG_RRRA, dev->base_addr + SONIC_CMDREG); - if (!wait_timeout(dev, SONIC_CMDREG, CMDREG_RRRA, 0, 2)) { - printk(KERN_ERR "%s: SONIC did not respond on RRRA command - giving up.", dev->name); - return 0; - } - - /* point SONIC to the first RDA */ - - outw(0, dev->base_addr + SONIC_URDA); - outw(priv->rdastart, dev->base_addr + SONIC_CRDA); - - /* set upper half of TDA address */ - - outw(0, dev->base_addr + SONIC_UTDA); - - return 1; -} - -/* stop SONIC so we can reinitialize it */ - -static void StopSONIC(struct net_device *dev) -{ - /* disable interrupts */ - - outb(inb(dev->base_addr + BCMREG) & (~BCMREG_IEN), dev->base_addr + BCMREG); - outb(0, dev->base_addr + SONIC_IMREG); - - /* reset the SONIC */ - - outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG); - udelay(10); - outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG); -} - -/* initialize card and SONIC for proper operation */ - -static void putcam(camentry_t * cams, int *camcnt, char *addr) -{ - camentry_t *pcam = cams + (*camcnt); - u8 *uaddr = (u8 *) addr; - - pcam->index = *camcnt; - pcam->addr0 = (((u16) uaddr[1]) << 8) | uaddr[0]; - pcam->addr1 = (((u16) uaddr[3]) << 8) | uaddr[2]; - pcam->addr2 = (((u16) uaddr[5]) << 8) | uaddr[4]; - (*camcnt)++; -} - -static void InitBoard(struct net_device *dev) -{ - ibmlana_priv *priv = netdev_priv(dev); - int camcnt; - camentry_t cams[16]; - u32 cammask; - struct netdev_hw_addr *ha; - u16 rcrval; - - /* reset the SONIC */ - - outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG); - udelay(10); - - /* clear all spurious interrupts */ - - outw(inw(dev->base_addr + SONIC_ISREG), dev->base_addr + SONIC_ISREG); - - /* set up the SONIC's bus interface - constant for this adapter - - must be done while the SONIC is in reset */ - - outw(DCREG_USR1 | DCREG_USR0 | DCREG_WC1 | DCREG_DW32, dev->base_addr + SONIC_DCREG); - outw(0, dev->base_addr + SONIC_DCREG2); - - /* remove reset form the SONIC */ - - outw(0, dev->base_addr + SONIC_CMDREG); - udelay(10); - - /* data sheet requires URRA to be programmed before setting up the CAM contents */ - - outw(0, dev->base_addr + SONIC_URRA); - - /* program the CAM entry 0 to the device address */ - - camcnt = 0; - putcam(cams, &camcnt, dev->dev_addr); - - /* start putting the multicast addresses into the CAM list. Stop if - it is full. */ - - netdev_for_each_mc_addr(ha, dev) { - putcam(cams, &camcnt, ha->addr); - if (camcnt == 16) - break; - } - - /* calculate CAM mask */ - - cammask = (1 << camcnt) - 1; - - /* feed CDA into SONIC, initialize RCR value (always get broadcasts) */ - - memcpy_toio(priv->base, cams, sizeof(camentry_t) * camcnt); - memcpy_toio(priv->base + (sizeof(camentry_t) * camcnt), &cammask, sizeof(cammask)); - -#ifdef DEBUG - printk("CAM setup:\n"); - dumpmem(dev, 0, sizeof(camentry_t) * camcnt + sizeof(cammask)); -#endif - - outw(0, dev->base_addr + SONIC_CAMPTR); - outw(camcnt, dev->base_addr + SONIC_CAMCNT); - outw(CMDREG_LCAM, dev->base_addr + SONIC_CMDREG); - if (!wait_timeout(dev, SONIC_CMDREG, CMDREG_LCAM, 0, 2)) { - printk(KERN_ERR "%s:SONIC did not respond on LCAM command - giving up.", dev->name); - return; - } else { - /* clear interrupt condition */ - - outw(ISREG_LCD, dev->base_addr + SONIC_ISREG); - -#ifdef DEBUG - printk("Loading CAM done, address pointers %04x:%04x\n", - inw(dev->base_addr + SONIC_URRA), - inw(dev->base_addr + SONIC_CAMPTR)); - { - int z; - - printk("\n-->CAM: PTR %04x CNT %04x\n", - inw(dev->base_addr + SONIC_CAMPTR), - inw(dev->base_addr + SONIC_CAMCNT)); - outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG); - for (z = 0; z < camcnt; z++) { - outw(z, dev->base_addr + SONIC_CAMEPTR); - printk("Entry %d: %04x %04x %04x\n", z, - inw(dev->base_addr + SONIC_CAMADDR0), - inw(dev->base_addr + SONIC_CAMADDR1), - inw(dev->base_addr + SONIC_CAMADDR2)); - } - outw(0, dev->base_addr + SONIC_CMDREG); - } -#endif - } - - rcrval = RCREG_BRD | RCREG_LB_NONE; - - /* if still multicast addresses left or ALLMULTI is set, set the multicast - enable bit */ - - if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt) - rcrval |= RCREG_AMC; - - /* promiscuous mode ? */ - - if (dev->flags & IFF_PROMISC) - rcrval |= RCREG_PRO; - - /* program receive mode */ - - outw(rcrval, dev->base_addr + SONIC_RCREG); -#ifdef DEBUG - printk("\nRCRVAL: %04x\n", rcrval); -#endif - - /* set up descriptors in shared memory + feed them into SONIC registers */ - - InitDscrs(dev); - if (!InitSONIC(dev)) - return; - - /* reset all pending interrupts */ - - outw(0xffff, dev->base_addr + SONIC_ISREG); - - /* enable transmitter + receiver interrupts */ - - outw(CMDREG_RXEN, dev->base_addr + SONIC_CMDREG); - outw(IMREG_PRXEN | IMREG_RBEEN | IMREG_PTXEN | IMREG_TXEREN, dev->base_addr + SONIC_IMREG); - - /* turn on card interrupts */ - - outb(inb(dev->base_addr + BCMREG) | BCMREG_IEN, dev->base_addr + BCMREG); - -#ifdef DEBUG - printk("Register dump after initialization:\n"); - dumpregs(dev); -#endif -} - -/* start transmission of a descriptor */ - -static void StartTx(struct net_device *dev, int descr) -{ - ibmlana_priv *priv = netdev_priv(dev); - int addr; - - addr = priv->tdastart + (descr * sizeof(tda_t)); - - /* put descriptor address into SONIC */ - - outw(addr, dev->base_addr + SONIC_CTDA); - - /* trigger transmitter */ - - priv->currtxdescr = descr; - outw(CMDREG_TXP, dev->base_addr + SONIC_CMDREG); -} - -/* ------------------------------------------------------------------------ - * interrupt handler(s) - * ------------------------------------------------------------------------ */ - -/* receive buffer area exhausted */ - -static void irqrbe_handler(struct net_device *dev) -{ - ibmlana_priv *priv = netdev_priv(dev); - - /* point the SONIC back to the RRA start */ - - outw(priv->rrastart, dev->base_addr + SONIC_RRP); - outw(priv->rrastart, dev->base_addr + SONIC_RWP); -} - -/* receive interrupt */ - -static void irqrx_handler(struct net_device *dev) -{ - ibmlana_priv *priv = netdev_priv(dev); - rda_t rda; - u32 rdaaddr, lrdaaddr; - - /* loop until ... */ - - while (1) { - /* read descriptor that was next to be filled by SONIC */ - - rdaaddr = priv->rdastart + (priv->nextrxdescr * sizeof(rda_t)); - lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t)); - memcpy_fromio(&rda, priv->base + rdaaddr, sizeof(rda_t)); - - /* iron out upper word halves of fields we use - SONIC will duplicate - bits 0..15 to 16..31 */ - - rda.status &= 0xffff; - rda.length &= 0xffff; - rda.startlo &= 0xffff; - - /* stop if the SONIC still owns it, i.e. there is no data for us */ - - if (rda.inuse) - break; - - /* good packet? */ - - else if (rda.status & RCREG_PRX) { - struct sk_buff *skb; - - /* fetch buffer */ - - skb = netdev_alloc_skb(dev, rda.length + 2); - if (skb == NULL) - dev->stats.rx_dropped++; - else { - /* copy out data */ - - memcpy_fromio(skb_put(skb, rda.length), - priv->base + - rda.startlo, rda.length); - - /* set up skb fields */ - - skb->protocol = eth_type_trans(skb, dev); - skb_checksum_none_assert(skb); - - /* bookkeeping */ - dev->stats.rx_packets++; - dev->stats.rx_bytes += rda.length; - - /* pass to the upper layers */ - netif_rx(skb); - } - } - - /* otherwise check error status bits and increase statistics */ - - else { - dev->stats.rx_errors++; - if (rda.status & RCREG_FAER) - dev->stats.rx_frame_errors++; - if (rda.status & RCREG_CRCR) - dev->stats.rx_crc_errors++; - } - - /* descriptor processed, will become new last descriptor in queue */ - - rda.link = 1; - rda.inuse = 1; - memcpy_toio(priv->base + rdaaddr, &rda, - sizeof(rda_t)); - - /* set up link and EOL = 0 in currently last descriptor. Only write - the link field since the SONIC may currently already access the - other fields. */ - - memcpy_toio(priv->base + lrdaaddr + 20, &rdaaddr, 4); - - /* advance indices */ - - priv->lastrxdescr = priv->nextrxdescr; - if ((++priv->nextrxdescr) >= priv->rxbufcnt) - priv->nextrxdescr = 0; - } -} - -/* transmit interrupt */ - -static void irqtx_handler(struct net_device *dev) -{ - ibmlana_priv *priv = netdev_priv(dev); - tda_t tda; - - /* fetch descriptor (we forgot the size ;-) */ - memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); - - /* update statistics */ - dev->stats.tx_packets++; - dev->stats.tx_bytes += tda.length; - - /* update our pointers */ - priv->txused[priv->currtxdescr] = 0; - priv->txusedcnt--; - - /* if there are more descriptors present in RAM, start them */ - if (priv->txusedcnt > 0) - StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT); - - /* tell the upper layer we can go on transmitting */ - netif_wake_queue(dev); -} - -static void irqtxerr_handler(struct net_device *dev) -{ - ibmlana_priv *priv = netdev_priv(dev); - tda_t tda; - - /* fetch descriptor to check status */ - memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); - - /* update statistics */ - dev->stats.tx_errors++; - if (tda.status & (TCREG_NCRS | TCREG_CRSL)) - dev->stats.tx_carrier_errors++; - if (tda.status & TCREG_EXC) - dev->stats.tx_aborted_errors++; - if (tda.status & TCREG_OWC) - dev->stats.tx_window_errors++; - if (tda.status & TCREG_FU) - dev->stats.tx_fifo_errors++; - - /* update our pointers */ - priv->txused[priv->currtxdescr] = 0; - priv->txusedcnt--; - - /* if there are more descriptors present in RAM, start them */ - if (priv->txusedcnt > 0) - StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT); - - /* tell the upper layer we can go on transmitting */ - netif_wake_queue(dev); -} - -/* general interrupt entry */ - -static irqreturn_t irq_handler(int dummy, void *device) -{ - struct net_device *dev = device; - u16 ival; - - /* in case we're not meant... */ - if (!(inb(dev->base_addr + BCMREG) & BCMREG_IPEND)) - return IRQ_NONE; - - /* loop through the interrupt bits until everything is clear */ - while (1) { - ival = inw(dev->base_addr + SONIC_ISREG); - - if (ival & ISREG_RBE) { - irqrbe_handler(dev); - outw(ISREG_RBE, dev->base_addr + SONIC_ISREG); - } - if (ival & ISREG_PKTRX) { - irqrx_handler(dev); - outw(ISREG_PKTRX, dev->base_addr + SONIC_ISREG); - } - if (ival & ISREG_TXDN) { - irqtx_handler(dev); - outw(ISREG_TXDN, dev->base_addr + SONIC_ISREG); - } - if (ival & ISREG_TXER) { - irqtxerr_handler(dev); - outw(ISREG_TXER, dev->base_addr + SONIC_ISREG); - } - break; - } - return IRQ_HANDLED; -} - -/* ------------------------------------------------------------------------ - * driver methods - * ------------------------------------------------------------------------ */ - -/* MCA info */ - -#if 0 /* info available elsewhere, but this is kept for reference */ -static int ibmlana_getinfo(char *buf, int slot, void *d) -{ - int len = 0, i; - struct net_device *dev = (struct net_device *) d; - ibmlana_priv *priv; - - /* can't say anything about an uninitialized device... */ - - if (dev == NULL) - return len; - priv = netdev_priv(dev); - - /* print info */ - - len += sprintf(buf + len, "IRQ: %d\n", priv->realirq); - len += sprintf(buf + len, "I/O: %#lx\n", dev->base_addr); - len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start, dev->mem_end - 1); - len += sprintf(buf + len, "Transceiver: %s\n", MediaNames[priv->medium]); - len += sprintf(buf + len, "Device: %s\n", dev->name); - len += sprintf(buf + len, "MAC address:"); - for (i = 0; i < 6; i++) - len += sprintf(buf + len, " %02x", dev->dev_addr[i]); - buf[len++] = '\n'; - buf[len] = 0; - - return len; -} -#endif - -/* open driver. Means also initialization and start of LANCE */ - -static int ibmlana_open(struct net_device *dev) -{ - int result; - ibmlana_priv *priv = netdev_priv(dev); - - /* register resources - only necessary for IRQ */ - - result = request_irq(priv->realirq, irq_handler, IRQF_SHARED, - dev->name, dev); - if (result != 0) { - printk(KERN_ERR "%s: failed to register irq %d\n", dev->name, dev->irq); - return result; - } - dev->irq = priv->realirq; - - /* set up the card and SONIC */ - InitBoard(dev); - - /* initialize operational flags */ - netif_start_queue(dev); - return 0; -} - -/* close driver. Shut down board and free allocated resources */ - -static int ibmlana_close(struct net_device *dev) -{ - /* turn off board */ - - /* release resources */ - if (dev->irq != 0) - free_irq(dev->irq, dev); - dev->irq = 0; - return 0; -} - -/* transmit a block. */ - -static netdev_tx_t ibmlana_tx(struct sk_buff *skb, struct net_device *dev) -{ - ibmlana_priv *priv = netdev_priv(dev); - int tmplen, addr; - unsigned long flags; - tda_t tda; - int baddr; - - /* find out if there are free slots for a frame to transmit. If not, - the upper layer is in deep desperation and we simply ignore the frame. */ - - if (priv->txusedcnt >= TXBUFCNT) { - dev->stats.tx_dropped++; - goto tx_done; - } - - /* copy the frame data into the next free transmit buffer - fillup missing */ - tmplen = skb->len; - if (tmplen < 60) - tmplen = 60; - baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE); - memcpy_toio(priv->base + baddr, skb->data, skb->len); - - /* copy filler into RAM - in case we're filling up... - we're filling a bit more than necessary, but that doesn't harm - since the buffer is far larger... - Sorry Linus for the filler string but I couldn't resist ;-) */ - - if (tmplen > skb->len) { - char *fill = "NetBSD is a nice OS too! "; - unsigned int destoffs = skb->len, l = strlen(fill); - - while (destoffs < tmplen) { - memcpy_toio(priv->base + baddr + destoffs, fill, l); - destoffs += l; - } - } - - /* set up the new frame descriptor */ - addr = priv->tdastart + (priv->nexttxdescr * sizeof(tda_t)); - memcpy_fromio(&tda, priv->base + addr, sizeof(tda_t)); - tda.length = tda.fraglength = tmplen; - memcpy_toio(priv->base + addr, &tda, sizeof(tda_t)); - - /* if there were no active descriptors, trigger the SONIC */ - spin_lock_irqsave(&priv->lock, flags); - - priv->txusedcnt++; - priv->txused[priv->nexttxdescr] = 1; - - /* are all transmission slots used up ? */ - if (priv->txusedcnt >= TXBUFCNT) - netif_stop_queue(dev); - - if (priv->txusedcnt == 1) - StartTx(dev, priv->nexttxdescr); - priv->nexttxdescr = (priv->nexttxdescr + 1) % TXBUFCNT; - - spin_unlock_irqrestore(&priv->lock, flags); -tx_done: - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -/* switch receiver mode. */ - -static void ibmlana_set_multicast_list(struct net_device *dev) -{ - /* first stop the SONIC... */ - StopSONIC(dev); - /* ...then reinit it with the new flags */ - InitBoard(dev); -} - -/* ------------------------------------------------------------------------ - * hardware check - * ------------------------------------------------------------------------ */ - -static int ibmlana_irq; -static int ibmlana_io; -static int startslot; /* counts through slots when probing multiple devices */ - -static short ibmlana_adapter_ids[] __initdata = { - IBM_LANA_ID, - 0x0000 -}; - -static char *ibmlana_adapter_names[] = { - "IBM LAN Adapter/A", - NULL -}; - - -static const struct net_device_ops ibmlana_netdev_ops = { - .ndo_open = ibmlana_open, - .ndo_stop = ibmlana_close, - .ndo_start_xmit = ibmlana_tx, - .ndo_set_rx_mode = ibmlana_set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int ibmlana_init_one(struct device *kdev) -{ - struct mca_device *mdev = to_mca_device(kdev); - struct net_device *dev; - int slot = mdev->slot, z, rc; - int base = 0, irq = 0, iobase = 0, memlen = 0; - ibmlana_priv *priv; - ibmlana_medium medium; - - dev = alloc_etherdev(sizeof(ibmlana_priv)); - if (!dev) - return -ENOMEM; - - dev->irq = ibmlana_irq; - dev->base_addr = ibmlana_io; - - base = dev->mem_start; - irq = dev->irq; - - /* deduce card addresses */ - getaddrs(mdev, &base, &memlen, &iobase, &irq, &medium); - - /* were we looking for something different ? */ - if (dev->irq && dev->irq != irq) { - rc = -ENODEV; - goto err_out; - } - if (dev->mem_start && dev->mem_start != base) { - rc = -ENODEV; - goto err_out; - } - - /* announce success */ - printk(KERN_INFO "%s: IBM LAN Adapter/A found in slot %d\n", dev->name, slot + 1); - - /* try to obtain I/O range */ - if (!request_region(iobase, IBM_LANA_IORANGE, DRV_NAME)) { - printk(KERN_ERR "%s: cannot allocate I/O range at %#x!\n", DRV_NAME, iobase); - startslot = slot + 1; - rc = -EBUSY; - goto err_out; - } - - priv = netdev_priv(dev); - priv->slot = slot; - priv->realirq = mca_device_transform_irq(mdev, irq); - priv->medium = medium; - spin_lock_init(&priv->lock); - - /* set base + irq for this device (irq not allocated so far) */ - - dev->irq = 0; - dev->mem_start = base; - dev->mem_end = base + memlen; - dev->base_addr = iobase; - - priv->base = ioremap(base, memlen); - if (!priv->base) { - printk(KERN_ERR "%s: cannot remap memory!\n", DRV_NAME); - startslot = slot + 1; - rc = -EBUSY; - goto err_out_reg; - } - - mca_device_set_name(mdev, ibmlana_adapter_names[mdev->index]); - mca_device_set_claim(mdev, 1); - - /* set methods */ - dev->netdev_ops = &ibmlana_netdev_ops; - dev->flags |= IFF_MULTICAST; - - /* copy out MAC address */ - - for (z = 0; z < ETH_ALEN; z++) - dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z); - - /* print config */ - - printk(KERN_INFO "%s: IRQ %d, I/O %#lx, memory %#lx-%#lx, " - "MAC address %pM.\n", - dev->name, priv->realirq, dev->base_addr, - dev->mem_start, dev->mem_end - 1, - dev->dev_addr); - printk(KERN_INFO "%s: %s medium\n", dev->name, MediaNames[priv->medium]); - - /* reset board */ - - ResetBoard(dev); - - /* next probe will start at next slot */ - - startslot = slot + 1; - - rc = register_netdev(dev); - if (rc) - goto err_out_claimed; - - dev_set_drvdata(kdev, dev); - return 0; - -err_out_claimed: - mca_device_set_claim(mdev, 0); - iounmap(priv->base); -err_out_reg: - release_region(iobase, IBM_LANA_IORANGE); -err_out: - free_netdev(dev); - return rc; -} - -static int ibmlana_remove_one(struct device *kdev) -{ - struct mca_device *mdev = to_mca_device(kdev); - struct net_device *dev = dev_get_drvdata(kdev); - ibmlana_priv *priv = netdev_priv(dev); - - unregister_netdev(dev); - /*DeinitBoard(dev); */ - release_region(dev->base_addr, IBM_LANA_IORANGE); - mca_device_set_claim(mdev, 0); - iounmap(priv->base); - free_netdev(dev); - return 0; -} - -/* ------------------------------------------------------------------------ - * modularization support - * ------------------------------------------------------------------------ */ - -module_param_named(irq, ibmlana_irq, int, 0); -module_param_named(io, ibmlana_io, int, 0); -MODULE_PARM_DESC(irq, "IBM LAN/A IRQ number"); -MODULE_PARM_DESC(io, "IBM LAN/A I/O base address"); -MODULE_LICENSE("GPL"); - -static struct mca_driver ibmlana_driver = { - .id_table = ibmlana_adapter_ids, - .driver = { - .name = "ibmlana", - .bus = &mca_bus_type, - .probe = ibmlana_init_one, - .remove = ibmlana_remove_one, - }, -}; - -static int __init ibmlana_init_module(void) -{ - return mca_register_driver(&ibmlana_driver); -} - -static void __exit ibmlana_cleanup_module(void) -{ - mca_unregister_driver(&ibmlana_driver); -} - -module_init(ibmlana_init_module); -module_exit(ibmlana_cleanup_module); diff --git a/drivers/net/ethernet/natsemi/ibmlana.h b/drivers/net/ethernet/natsemi/ibmlana.h deleted file mode 100644 index accd5efc9c8a..000000000000 --- a/drivers/net/ethernet/natsemi/ibmlana.h +++ /dev/null @@ -1,278 +0,0 @@ -#ifndef _IBM_LANA_INCLUDE_ -#define _IBM_LANA_INCLUDE_ - -#ifdef _IBM_LANA_DRIVER_ - -/* maximum packet size */ - -#define PKTSIZE 1524 - -/* number of transmit buffers */ - -#define TXBUFCNT 4 - -/* Adapter ID's */ -#define IBM_LANA_ID 0xffe0 - -/* media enumeration - defined in a way that it fits onto the LAN/A's - POS registers... */ - -typedef enum { - Media_10BaseT, Media_10Base5, - Media_Unknown, Media_10Base2, Media_Count -} ibmlana_medium; - -/* private structure */ - -typedef struct { - unsigned int slot; /* MCA-Slot-# */ - int realirq; /* memorizes actual IRQ, even when - currently not allocated */ - ibmlana_medium medium; /* physical cannector */ - u32 tdastart, txbufstart, /* addresses */ - rrastart, rxbufstart, rdastart, rxbufcnt, txusedcnt; - int nextrxdescr, /* next rx descriptor to be used */ - lastrxdescr, /* last free rx descriptor */ - nexttxdescr, /* last tx descriptor to be used */ - currtxdescr, /* tx descriptor currently tx'ed */ - txused[TXBUFCNT]; /* busy flags */ - void __iomem *base; - spinlock_t lock; -} ibmlana_priv; - -/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes - a full 64K I/O range... */ - -#define IBM_LANA_IORANGE 0xa0 - -/* Command Register: */ - -#define SONIC_CMDREG 0x00 -#define CMDREG_HTX 0x0001 /* halt transmission */ -#define CMDREG_TXP 0x0002 /* start transmission */ -#define CMDREG_RXDIS 0x0004 /* disable receiver */ -#define CMDREG_RXEN 0x0008 /* enable receiver */ -#define CMDREG_STP 0x0010 /* stop timer */ -#define CMDREG_ST 0x0020 /* start timer */ -#define CMDREG_RST 0x0080 /* software reset */ -#define CMDREG_RRRA 0x0100 /* force SONIC to read first RRA */ -#define CMDREG_LCAM 0x0200 /* force SONIC to read CAM descrs */ - -/* Data Configuration Register */ - -#define SONIC_DCREG 0x02 -#define DCREG_EXBUS 0x8000 /* Extended Bus Mode */ -#define DCREG_LBR 0x2000 /* Latched Bus Retry */ -#define DCREG_PO1 0x1000 /* Programmable Outputs */ -#define DCREG_PO0 0x0800 -#define DCREG_SBUS 0x0400 /* Synchronous Bus Mode */ -#define DCREG_USR1 0x0200 /* User Definable Pins */ -#define DCREG_USR0 0x0100 -#define DCREG_WC0 0x0000 /* 0..3 Wait States */ -#define DCREG_WC1 0x0040 -#define DCREG_WC2 0x0080 -#define DCREG_WC3 0x00c0 -#define DCREG_DW16 0x0000 /* 16 bit Bus Mode */ -#define DCREG_DW32 0x0020 /* 32 bit Bus Mode */ -#define DCREG_BMS 0x0010 /* Block Mode Select */ -#define DCREG_RFT4 0x0000 /* 4/8/16/24 bytes RX Threshold */ -#define DCREG_RFT8 0x0004 -#define DCREG_RFT16 0x0008 -#define DCREG_RFT24 0x000c -#define DCREG_TFT8 0x0000 /* 8/16/24/28 bytes TX Threshold */ -#define DCREG_TFT16 0x0001 -#define DCREG_TFT24 0x0002 -#define DCREG_TFT28 0x0003 - -/* Receive Control Register */ - -#define SONIC_RCREG 0x04 -#define RCREG_ERR 0x8000 /* accept damaged and collided pkts */ -#define RCREG_RNT 0x4000 /* accept packets that are < 64 */ -#define RCREG_BRD 0x2000 /* accept broadcasts */ -#define RCREG_PRO 0x1000 /* promiscuous mode */ -#define RCREG_AMC 0x0800 /* accept all multicasts */ -#define RCREG_LB_NONE 0x0000 /* no loopback */ -#define RCREG_LB_MAC 0x0200 /* MAC loopback */ -#define RCREG_LB_ENDEC 0x0400 /* ENDEC loopback */ -#define RCREG_LB_XVR 0x0600 /* Transceiver loopback */ -#define RCREG_MC 0x0100 /* Multicast received */ -#define RCREG_BC 0x0080 /* Broadcast received */ -#define RCREG_LPKT 0x0040 /* last packet in RBA */ -#define RCREG_CRS 0x0020 /* carrier sense present */ -#define RCREG_COL 0x0010 /* recv'd packet with collision */ -#define RCREG_CRCR 0x0008 /* recv'd packet with CRC error */ -#define RCREG_FAER 0x0004 /* recv'd packet with inv. framing */ -#define RCREG_LBK 0x0002 /* recv'd loopback packet */ -#define RCREG_PRX 0x0001 /* recv'd packet is OK */ - -/* Transmit Control Register */ - -#define SONIC_TCREG 0x06 -#define TCREG_PINT 0x8000 /* generate interrupt after TDA read */ -#define TCREG_POWC 0x4000 /* timer start out of window detect */ -#define TCREG_CRCI 0x2000 /* inhibit CRC generation */ -#define TCREG_EXDIS 0x1000 /* disable excessive deferral timer */ -#define TCREG_EXD 0x0400 /* excessive deferral occurred */ -#define TCREG_DEF 0x0200 /* single deferral occurred */ -#define TCREG_NCRS 0x0100 /* no carrier detected */ -#define TCREG_CRSL 0x0080 /* carrier lost */ -#define TCREG_EXC 0x0040 /* excessive collisions occurred */ -#define TCREG_OWC 0x0020 /* out of window collision occurred */ -#define TCREG_PMB 0x0008 /* packet monitored bad */ -#define TCREG_FU 0x0004 /* FIFO underrun */ -#define TCREG_BCM 0x0002 /* byte count mismatch of fragments */ -#define TCREG_PTX 0x0001 /* packet transmitted OK */ - -/* Interrupt Mask Register */ - -#define SONIC_IMREG 0x08 -#define IMREG_BREN 0x4000 /* interrupt when bus retry occurred */ -#define IMREG_HBLEN 0x2000 /* interrupt when heartbeat lost */ -#define IMREG_LCDEN 0x1000 /* interrupt when CAM loaded */ -#define IMREG_PINTEN 0x0800 /* interrupt when PINT in TDA set */ -#define IMREG_PRXEN 0x0400 /* interrupt when packet received */ -#define IMREG_PTXEN 0x0200 /* interrupt when packet was sent */ -#define IMREG_TXEREN 0x0100 /* interrupt when send failed */ -#define IMREG_TCEN 0x0080 /* interrupt when timer completed */ -#define IMREG_RDEEN 0x0040 /* interrupt when RDA exhausted */ -#define IMREG_RBEEN 0x0020 /* interrupt when RBA exhausted */ -#define IMREG_RBAEEN 0x0010 /* interrupt when RBA too short */ -#define IMREG_CRCEN 0x0008 /* interrupt when CRC counter rolls */ -#define IMREG_FAEEN 0x0004 /* interrupt when FAE counter rolls */ -#define IMREG_MPEN 0x0002 /* interrupt when MP counter rolls */ -#define IMREG_RFOEN 0x0001 /* interrupt when Rx FIFO overflows */ - -/* Interrupt Status Register */ - -#define SONIC_ISREG 0x0a -#define ISREG_BR 0x4000 /* bus retry occurred */ -#define ISREG_HBL 0x2000 /* heartbeat lost */ -#define ISREG_LCD 0x1000 /* CAM loaded */ -#define ISREG_PINT 0x0800 /* PINT in TDA set */ -#define ISREG_PKTRX 0x0400 /* packet received */ -#define ISREG_TXDN 0x0200 /* packet was sent */ -#define ISREG_TXER 0x0100 /* send failed */ -#define ISREG_TC 0x0080 /* timer completed */ -#define ISREG_RDE 0x0040 /* RDA exhausted */ -#define ISREG_RBE 0x0020 /* RBA exhausted */ -#define ISREG_RBAE 0x0010 /* RBA too short for received frame */ -#define ISREG_CRC 0x0008 /* CRC counter rolls over */ -#define ISREG_FAE 0x0004 /* FAE counter rolls over */ -#define ISREG_MP 0x0002 /* MP counter rolls over */ -#define ISREG_RFO 0x0001 /* Rx FIFO overflows */ - -#define SONIC_UTDA 0x0c /* current transmit descr address */ -#define SONIC_CTDA 0x0e - -#define SONIC_URDA 0x1a /* current receive descr address */ -#define SONIC_CRDA 0x1c - -#define SONIC_CRBA0 0x1e /* current receive buffer address */ -#define SONIC_CRBA1 0x20 - -#define SONIC_RBWC0 0x22 /* word count in receive buffer */ -#define SONIC_RBWC1 0x24 - -#define SONIC_EOBC 0x26 /* minimum space to be free in RBA */ - -#define SONIC_URRA 0x28 /* upper address of CDA & Recv Area */ - -#define SONIC_RSA 0x2a /* start of receive resource area */ - -#define SONIC_REA 0x2c /* end of receive resource area */ - -#define SONIC_RRP 0x2e /* resource read pointer */ - -#define SONIC_RWP 0x30 /* resource write pointer */ - -#define SONIC_CAMEPTR 0x42 /* CAM entry pointer */ - -#define SONIC_CAMADDR2 0x44 /* CAM address ports */ -#define SONIC_CAMADDR1 0x46 -#define SONIC_CAMADDR0 0x48 - -#define SONIC_CAMPTR 0x4c /* lower address of CDA */ - -#define SONIC_CAMCNT 0x4e /* # of CAM descriptors to load */ - -/* Data Configuration Register 2 */ - -#define SONIC_DCREG2 0x7e -#define DCREG2_EXPO3 0x8000 /* extended programmable outputs */ -#define DCREG2_EXPO2 0x4000 -#define DCREG2_EXPO1 0x2000 -#define DCREG2_EXPO0 0x1000 -#define DCREG2_HD 0x0800 /* heartbeat disable */ -#define DCREG2_JD 0x0200 /* jabber timer disable */ -#define DCREG2_AUTO 0x0100 /* enable AUI/TP auto selection */ -#define DCREG2_XWRAP 0x0040 /* TP transceiver loopback */ -#define DCREG2_PH 0x0010 /* HOLD request timing */ -#define DCREG2_PCM 0x0004 /* packet compress when matched */ -#define DCREG2_PCNM 0x0002 /* packet compress when not matched */ -#define DCREG2_RJCM 0x0001 /* inverse packet match via CAM */ - -/* Board Control Register: Enable RAM, Interrupts... */ - -#define BCMREG 0x80 -#define BCMREG_RAMEN 0x80 /* switch over to RAM */ -#define BCMREG_IPEND 0x40 /* interrupt pending ? */ -#define BCMREG_RESET 0x08 /* reset board */ -#define BCMREG_16BIT 0x04 /* adapter in 16-bit slot */ -#define BCMREG_RAMWIN 0x02 /* enable RAM window */ -#define BCMREG_IEN 0x01 /* interrupt enable */ - -/* MAC Address PROM */ - -#define MACADDRPROM 0x92 - -/* structure of a CAM entry */ - -typedef struct { - u32 index; /* pointer into CAM area */ - u32 addr0; /* address part (bits 0..15 used) */ - u32 addr1; - u32 addr2; -} camentry_t; - -/* structure of a receive resource */ - -typedef struct { - u32 startlo; /* start address (bits 0..15 used) */ - u32 starthi; - u32 cntlo; /* size in 16-bit quantities */ - u32 cnthi; -} rra_t; - -/* structure of a receive descriptor */ - -typedef struct { - u32 status; /* packet status */ - u32 length; /* length in bytes */ - u32 startlo; /* start address */ - u32 starthi; - u32 seqno; /* frame sequence */ - u32 link; /* pointer to next descriptor */ - /* bit 0 = EOL */ - u32 inuse; /* !=0 --> free for SONIC to write */ -} rda_t; - -/* structure of a transmit descriptor */ - -typedef struct { - u32 status; /* transmit status */ - u32 config; /* value for TCR */ - u32 length; /* total length */ - u32 fragcount; /* number of fragments */ - u32 startlo; /* start address of fragment */ - u32 starthi; - u32 fraglength; /* length of this fragment */ - /* more address/length triplets may */ - /* follow here */ - u32 link; /* pointer to next descriptor */ - /* bit 0 = EOL */ -} tda_t; - -#endif /* _IBM_LANA_DRIVER_ */ - -#endif /* _IBM_LANA_INCLUDE_ */ diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index f4ad60c97eae..7a5e295588b0 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -862,9 +862,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) prev_eedata = eedata; } - /* Store MAC Address in perm_addr */ - memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); - np = netdev_priv(dev); np->ioaddr = ioaddr; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 7c94c089212f..bfd887382e19 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -8014,7 +8014,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) /* Set the factory defined MAC address initially */ dev->addr_len = ETH_ALEN; memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); - memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); /* initialize number of multicast & unicast MAC entries variables */ if (sp->device_type == XFRAME_I_DEVICE) { diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c index 92dd72d3f9de..f8f073880f84 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c @@ -82,9 +82,9 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct vxgedev *vdev = netdev_priv(dev); - strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME)); - strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION)); - strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN); + strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info)); info->regdump_len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 7c87105ca049..794444e09492 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -4682,7 +4682,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) /* Store the fw version for ethttool option */ strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version); memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); - memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN); /* Copy the station mac address to the list */ for (i = 0; i < vdev->no_of_vpath; i++) { diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index cbd6a529d0c0..162da8975b05 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -878,8 +878,8 @@ static int w90p910_ether_ioctl(struct net_device *dev, static void w90p910_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 87fa5919c455..0b8de12bcbca 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -3055,7 +3055,6 @@ static int nv_set_mac_address(struct net_device *dev, void *addr) /* synchronized against open : rtnl_lock() held by caller */ memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); - dev->addr_assign_type &= ~NET_ADDR_RANDOM; if (netif_running(dev)) { netif_tx_lock_bh(dev); @@ -5766,9 +5765,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) "%s: set workaround bit for reversed mac addr\n", __func__); } - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - if (!is_valid_ether_addr(dev->perm_addr)) { + if (!is_valid_ether_addr(dev->dev_addr)) { /* * Bad mac address. At least one bios sets the mac address * to 01:23:45:67:89:ab diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 3466ca1e8f6c..c4122c86f829 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -800,7 +800,7 @@ static int lpc_mii_probe(struct net_device *ndev) else netdev_info(ndev, "using RMII interface\n"); phydev = phy_connect(ndev, dev_name(&phydev->dev), - &lpc_handle_link_change, 0, + &lpc_handle_link_change, lpc_phy_interface_mode(&pldat->pdev->dev)); if (IS_ERR(phydev)) { @@ -1239,9 +1239,10 @@ static int lpc_eth_open(struct net_device *ndev) static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { - strcpy(info->driver, MODNAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, dev_name(ndev->dev.parent)); + strlcpy(info->driver, MODNAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(ndev->dev.parent), + sizeof(info->bus_info)); } static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev) diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index b5499198e029..921729f9c85c 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -1350,10 +1350,10 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev) static void octeon_mgmt_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { - strncpy(info->driver, DRV_NAME, sizeof(info->driver)); - strncpy(info->version, DRV_VERSION, sizeof(info->version)); - strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); - strncpy(info->bus_info, "N/A", sizeof(info->bus_info)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); info->n_stats = 0; info->testinfo_len = 0; info->regdump_len = 0; @@ -1534,12 +1534,10 @@ static int octeon_mgmt_probe(struct platform_device *pdev) mac = of_get_mac_address(pdev->dev.of_node); - if (mac && is_valid_ether_addr(mac)) { + if (mac && is_valid_ether_addr(mac)) memcpy(netdev->dev_addr, mac, ETH_ALEN); - netdev->addr_assign_type &= ~NET_ADDR_RANDOM; - } else { + else eth_hw_addr_random(netdev); - } p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index bf829ee30077..cac33e5f9bc2 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -1808,9 +1808,10 @@ static int check_if_running(struct net_device *dev) static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct hamachi_private *np = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(np->pci_dev)); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index fbaed4fa72fa..d28593b1fc3e 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -1326,9 +1326,10 @@ static void set_rx_mode(struct net_device *dev) static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct yellowfin_private *np = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(np->pci_dev)); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static const struct ethtool_ops ethtool_ops = { diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c index 7f556a84925d..1bcaf45aa864 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -201,11 +201,8 @@ netxen_setup_minidump(struct netxen_adapter *adapter) adapter->mdump.md_template = kmalloc(adapter->mdump.md_template_size, GFP_KERNEL); - if (!adapter->mdump.md_template) { - dev_err(&adapter->pdev->dev, "Unable to allocate memory " - "for minidump template.\n"); + if (!adapter->mdump.md_template) return -ENOMEM; - } err = netxen_get_minidump_template(adapter); if (err) { diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 946160fa5843..9fbb1cdbfa47 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -670,11 +670,9 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, } cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC); - if (cur == NULL) { - printk(KERN_ERR "%s: failed to add mac address filter\n", - adapter->netdev->name); + if (cur == NULL) return -ENOMEM; - } + memcpy(cur->mac_addr, addr, ETH_ALEN); list_add_tail(&cur->list, &adapter->mac_list); return nx_p3_sre_macaddr_change(adapter, @@ -2568,16 +2566,10 @@ netxen_dump_fw(struct netxen_adapter *adapter) adapter->mdump.md_capture_size; if (!adapter->mdump.md_capture_buff) { adapter->mdump.md_capture_buff = - vmalloc(adapter->mdump.md_dump_size); - if (!adapter->mdump.md_capture_buff) { - dev_info(&adapter->pdev->dev, - "Unable to allocate memory for minidump " - "capture_buffer(%d bytes).\n", - adapter->mdump.md_dump_size); + vzalloc(adapter->mdump.md_dump_size); + if (!adapter->mdump.md_capture_buff) return; - } - memset(adapter->mdump.md_capture_buff, 0, - adapter->mdump.md_dump_size); + if (netxen_collect_minidump(adapter)) { adapter->mdump.has_valid_dump = 0; adapter->mdump.md_dump_size = 0; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 695667d471a1..4782dcfde736 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -197,41 +197,33 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) struct nx_host_sds_ring *sds_ring; struct nx_host_tx_ring *tx_ring; struct netxen_rx_buffer *rx_buf; - int ring, i, size; + int ring, i; struct netxen_cmd_buffer *cmd_buf_arr; struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - size = sizeof(struct nx_host_tx_ring); - tx_ring = kzalloc(size, GFP_KERNEL); - if (tx_ring == NULL) { - dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n", - netdev->name); + tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL); + if (tx_ring == NULL) return -ENOMEM; - } + adapter->tx_ring = tx_ring; tx_ring->num_desc = adapter->num_txd; tx_ring->txq = netdev_get_tx_queue(netdev, 0); cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); - if (cmd_buf_arr == NULL) { - dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n", - netdev->name); + if (cmd_buf_arr == NULL) goto err_out; - } + tx_ring->cmd_buf_arr = cmd_buf_arr; recv_ctx = &adapter->recv_ctx; - size = adapter->max_rds_rings * sizeof (struct nx_host_rds_ring); - rds_ring = kzalloc(size, GFP_KERNEL); - if (rds_ring == NULL) { - dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n", - netdev->name); + rds_ring = kcalloc(adapter->max_rds_rings, + sizeof(struct nx_host_rds_ring), GFP_KERNEL); + if (rds_ring == NULL) goto err_out; - } + recv_ctx->rds_rings = rds_ring; for (ring = 0; ring < adapter->max_rds_rings; ring++) { diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 69e321a65077..501f49207da5 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -501,12 +501,11 @@ netxen_read_mac_addr(struct netxen_adapter *adapter) for (i = 0; i < 6; i++) netdev->dev_addr[i] = *(p + 5 - i); - memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); /* set station address */ - if (!is_valid_ether_addr(netdev->perm_addr)) + if (!is_valid_ether_addr(netdev->dev_addr)) dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); return 0; @@ -3177,11 +3176,8 @@ netxen_list_config_vlan_ip(struct netxen_adapter *adapter, } cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC); - if (cur == NULL) { - printk(KERN_ERR "%s: failed to add vlan ip to list\n", - adapter->netdev->name); + if (cur == NULL) return; - } cur->ip_addr = ifa->ifa_address; list_add_tail(&cur->list, &adapter->vlan_ip_list); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 67a679aaf29a..8fd38cb6d26a 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2591,13 +2591,11 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) else qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; - qdev->lrg_buf = - kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb), - GFP_KERNEL); - if (qdev->lrg_buf == NULL) { - netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); + qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers, + sizeof(struct ql_rcv_buf_cb), + GFP_KERNEL); + if (qdev->lrg_buf == NULL) return -ENOMEM; - } qdev->lrg_buf_q_alloc_virt_addr = pci_alloc_consistent(qdev->pdev, @@ -3867,7 +3865,6 @@ static int ql3xxx_probe(struct pci_dev *pdev, ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); } - memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile index c4b8ced83829..7722a203e388 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/Makefile +++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile @@ -6,4 +6,6 @@ obj-$(CONFIG_QLCNIC) := qlcnic.o qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \ qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \ - qlcnic_sysfs.o qlcnic_minidump.o + qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \ + qlcnic_83xx_init.o qlcnic_83xx_vnic.o \ + qlcnic_minidump.o diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index bc7ec64e9c7a..11c3db6daffd 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1,6 +1,6 @@ /* * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation + * Copyright (c) 2009-2013 QLogic Corporation * * See LICENSE.qlcnic for copyright and licensing details. */ @@ -33,11 +33,13 @@ #include <linux/if_vlan.h> #include "qlcnic_hdr.h" +#include "qlcnic_hw.h" +#include "qlcnic_83xx_hw.h" #define _QLCNIC_LINUX_MAJOR 5 -#define _QLCNIC_LINUX_MINOR 0 -#define _QLCNIC_LINUX_SUBVERSION 30 -#define QLCNIC_LINUX_VERSIONID "5.0.30" +#define _QLCNIC_LINUX_MINOR 1 +#define _QLCNIC_LINUX_SUBVERSION 34 +#define QLCNIC_LINUX_VERSIONID "5.1.34" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -96,7 +98,6 @@ #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ + MGMT_CMD_DESC_RESV) #define QLCNIC_MAX_TX_TIMEOUTS 2 - /* * Following are the states of the Phantom. Phantom will set them and * Host will read to check if the fields are correct. @@ -203,6 +204,7 @@ struct uni_data_desc{ /* Flash Defines and Structures */ #define QLCNIC_FLT_LOCATION 0x3F1000 +#define QLCNIC_FDT_LOCATION 0x3F0000 #define QLCNIC_B0_FW_IMAGE_REGION 0x74 #define QLCNIC_C0_FW_IMAGE_REGION 0x97 #define QLCNIC_BOOTLD_REGION 0X72 @@ -223,6 +225,36 @@ struct qlcnic_flt_entry { u32 end_addr; }; +/* Flash Descriptor Table */ +struct qlcnic_fdt { + u32 valid; + u16 ver; + u16 len; + u16 cksum; + u16 unused; + u8 model[16]; + u16 mfg_id; + u16 id; + u8 flag; + u8 erase_cmd; + u8 alt_erase_cmd; + u8 write_enable_cmd; + u8 write_enable_bits; + u8 write_statusreg_cmd; + u8 unprotected_sec_cmd; + u8 read_manuf_cmd; + u32 block_size; + u32 alt_block_size; + u32 flash_size; + u32 write_enable_data; + u8 readid_addr_len; + u8 write_disable_bits; + u8 read_dev_id_len; + u8 chip_erase_cmd; + u16 read_timeo; + u8 protected_sec_cmd; + u8 resvd[65]; +}; /* Magic number to let user know flash is programmed */ #define QLCNIC_BDINFO_MAGIC 0x12345678 @@ -267,6 +299,12 @@ struct qlcnic_flt_entry { extern char qlcnic_driver_name[]; +extern int qlcnic_use_msi; +extern int qlcnic_use_msi_x; +extern int qlcnic_auto_fw_reset; +extern int qlcnic_load_fw_file; +extern int qlcnic_config_npars; + /* Number of status descriptors to handle per interrupt */ #define MAX_STATUS_HANDLE (64) @@ -314,6 +352,7 @@ struct qlcnic_rx_buffer { #define QLCNIC_INTR_DEFAULT 0x04 #define QLCNIC_CONFIG_INTR_COALESCE 3 +#define QLCNIC_DEV_INFO_SIZE 1 struct qlcnic_nic_intr_coalesce { u8 type; @@ -337,6 +376,7 @@ struct qlcnic_dump_template_hdr { u32 sys_info[3]; u32 saved_state[16]; u32 cap_sizes[8]; + u32 ocm_wnd_reg[16]; u32 rsvd[0]; }; @@ -396,12 +436,24 @@ struct qlcnic_hardware_context { u16 act_pci_func; u32 capabilities; + u32 capabilities2; u32 temp; u32 int_vec_bit; u32 fw_hal_version; + u32 port_config; struct qlcnic_hardware_ops *hw_ops; struct qlcnic_nic_intr_coalesce coal; struct qlcnic_fw_dump fw_dump; + struct qlcnic_fdt fdt; + struct qlc_83xx_reset reset; + struct qlc_83xx_idc idc; + struct qlc_83xx_fw_info fw_info; + struct qlcnic_intrpt_config *intr_tbl; + u32 *reg_tbl; + u32 *ext_reg_tbl; + u32 mbox_aen[QLC_83XX_MBX_AEN_CNT]; + u32 mbox_reg[4]; + spinlock_t mbx_lock; }; struct qlcnic_adapter_stats { @@ -422,6 +474,8 @@ struct qlcnic_adapter_stats { u64 null_rxbuf; u64 rx_dma_map_error; u64 tx_dma_map_error; + u64 spurious_intr; + u64 mac_filter_limit_overrun; }; /* @@ -460,12 +514,17 @@ struct qlcnic_host_sds_ring { } ____cacheline_internodealigned_in_smp; struct qlcnic_host_tx_ring { + int irq; + void __iomem *crb_intr_mask; + char name[IFNAMSIZ+4]; u16 ctx_id; u32 producer; u32 sw_consumer; u32 num_desc; void __iomem *crb_cmd_producer; struct cmd_desc_type0 *desc_head; + struct qlcnic_adapter *adapter; + struct napi_struct napi; struct qlcnic_cmd_buffer *cmd_buf_arr; __le32 *hw_consumer; @@ -492,8 +551,6 @@ struct qlcnic_recv_context { /* HW context creation */ #define QLCNIC_OS_CRB_RETRY_COUNT 4000 -#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \ - (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16)) #define QLCNIC_CDRP_CMD_BIT 0x80000000 @@ -513,43 +570,6 @@ struct qlcnic_recv_context { * the crb QLCNIC_CDRP_CRB_OFFSET. */ #define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd)) -#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0) - -#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001 -#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002 -#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003 -#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004 -#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005 -#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006 -#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007 -#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008 -#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009 -#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a -#define QLCNIC_CDRP_CMD_INTRPT_TEST 0x00000011 -#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012 -#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013 -#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014 -#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015 -#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016 -#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017 -#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018 -#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019 -#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f - -#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020 -#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021 -#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022 -#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024 -#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025 -#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026 -#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027 -#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028 -#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029 -#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a -#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E -#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f -#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030 -#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037 #define QLCNIC_RCODE_SUCCESS 0 #define QLCNIC_RCODE_INVALID_ARGS 6 @@ -726,6 +746,11 @@ struct qlcnic_mac_list_s { uint8_t mac_addr[ETH_ALEN+2]; }; +/* MAC Learn */ +#define NO_MAC_LEARN 0 +#define DRV_MAC_LEARN 1 +#define FDB_MAC_LEARN 2 + #define QLCNIC_HOST_REQUEST 0x13 #define QLCNIC_REQUEST 0x14 @@ -762,7 +787,7 @@ struct qlcnic_mac_list_s { */ #define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f -#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141 +#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D #define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ #define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ @@ -779,6 +804,8 @@ struct qlcnic_mac_list_s { #define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31 #define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2 +#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3 +#define QLCNIC_FW_CAPABILITY_2_OCBB BIT_5 /* module types */ #define LINKEVENT_MODULE_NOT_PRESENT 1 @@ -855,7 +882,7 @@ struct qlcnic_ipaddr { #define QLCNIC_MSI_ENABLED 0x02 #define QLCNIC_MSIX_ENABLED 0x04 -#define QLCNIC_LRO_ENABLED 0x08 +#define QLCNIC_LRO_ENABLED 0x01 #define QLCNIC_LRO_DISABLED 0x00 #define QLCNIC_BRIDGE_ENABLED 0X10 #define QLCNIC_DIAG_ENABLED 0x20 @@ -887,6 +914,7 @@ struct qlcnic_ipaddr { #define __QLCNIC_AER 5 #define __QLCNIC_DIAG_RES_ALLOC 6 #define __QLCNIC_LED_ENABLE 7 +#define __QLCNIC_ELB_INPROGRESS 8 #define QLCNIC_INTERRUPT_TEST 1 #define QLCNIC_LOOPBACK_TEST 2 @@ -895,12 +923,14 @@ struct qlcnic_ipaddr { #define QLCNIC_FILTER_AGE 80 #define QLCNIC_READD_AGE 20 #define QLCNIC_LB_MAX_FILTERS 64 +#define QLCNIC_LB_BUCKET_SIZE 32 /* QLCNIC Driver Error Code */ #define QLCNIC_FW_NOT_RESPOND 51 #define QLCNIC_TEST_IN_PROGRESS 52 #define QLCNIC_UNDEFINED_ERROR 53 #define QLCNIC_LB_CABLE_NOT_CONN 54 +#define QLCNIC_ILB_MAX_RCV_LOOP 10 struct qlcnic_filter { struct hlist_node fnode; @@ -912,7 +942,8 @@ struct qlcnic_filter { struct qlcnic_filter_hash { struct hlist_head *fhead; u8 fnum; - u8 fmax; + u16 fmax; + u16 fbucket_size; }; struct qlcnic_adapter { @@ -934,6 +965,7 @@ struct qlcnic_adapter { u8 max_rds_rings; u8 max_sds_rings; + u8 rx_csum; u8 portnum; u8 fw_wait_cnt; @@ -954,8 +986,10 @@ struct qlcnic_adapter { u8 mac_addr[ETH_ALEN]; u64 dev_rst_time; - u8 mac_learn; + bool drv_mac_learn; + bool fdb_mac_learn; unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u8 flash_mfg_id; struct qlcnic_npar_info *npars; struct qlcnic_eswitch *eswitch; struct qlcnic_nic_template *nic_ops; @@ -969,12 +1003,17 @@ struct qlcnic_adapter { void __iomem *isr_int_vec; struct msix_entry *msix_entries; + struct workqueue_struct *qlcnic_wq; struct delayed_work fw_work; + struct delayed_work idc_aen_work; struct qlcnic_filter_hash fhash; + struct qlcnic_filter_hash rx_fhash; spinlock_t tx_clean_lock; spinlock_t mac_learn_lock; + /* spinlock for catching rcv filters for eswitch traffic */ + spinlock_t rx_mac_learn_lock; u32 file_prd_off; /*File fw product offset*/ u32 fw_version; const struct firmware *fw; @@ -995,7 +1034,24 @@ struct qlcnic_info_le { __le16 max_rx_ques; __le16 min_tx_bw; __le16 max_tx_bw; - u8 reserved2[104]; + __le32 op_type; + __le16 max_bw_reg_offset; + __le16 max_linkspeed_reg_offset; + __le32 capability1; + __le32 capability2; + __le32 capability3; + __le16 max_tx_mac_filters; + __le16 max_rx_mcast_mac_filters; + __le16 max_rx_ucast_mac_filters; + __le16 max_rx_ip_addr; + __le16 max_rx_lro_flow; + __le16 max_rx_status_rings; + __le16 max_rx_buf_rings; + __le16 max_tx_vlan_keys; + u8 total_pf; + u8 total_rss_engines; + __le16 max_vports; + u8 reserved2[64]; } __packed; struct qlcnic_info { @@ -1005,12 +1061,28 @@ struct qlcnic_info { u16 switch_mode; u32 capabilities; u8 max_mac_filters; - u8 reserved1; u16 max_mtu; u16 max_tx_ques; u16 max_rx_ques; u16 min_tx_bw; u16 max_tx_bw; + u32 op_type; + u16 max_bw_reg_offset; + u16 max_linkspeed_reg_offset; + u32 capability1; + u32 capability2; + u32 capability3; + u16 max_tx_mac_filters; + u16 max_rx_mcast_mac_filters; + u16 max_rx_ucast_mac_filters; + u16 max_rx_ip_addr; + u16 max_rx_lro_flow; + u16 max_rx_status_rings; + u16 max_rx_buf_rings; + u16 max_tx_vlan_keys; + u8 total_pf; + u8 total_rss_engines; + u16 max_vports; }; struct qlcnic_pci_info_le { @@ -1024,7 +1096,9 @@ struct qlcnic_pci_info_le { __le16 reserved1[2]; u8 mac[ETH_ALEN]; - u8 reserved2[106]; + __le16 func_count; + u8 reserved2[104]; + } __packed; struct qlcnic_pci_info { @@ -1035,6 +1109,7 @@ struct qlcnic_pci_info { u16 tx_min_bw; u16 tx_max_bw; u8 mac[ETH_ALEN]; + u16 func_count; }; struct qlcnic_npar_info { @@ -1266,10 +1341,8 @@ struct qlcnic_esw_statistics { #define QLCNIC_RESET_QUIESCENT 0xadd00020 struct _cdrp_cmd { - u32 cmd; - u32 arg1; - u32 arg2; - u32 arg3; + u32 num; + u32 *arg; }; struct qlcnic_cmd_args { @@ -1279,9 +1352,6 @@ struct qlcnic_cmd_args { int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config); - -int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off); -int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data); int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data); int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data); void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *); @@ -1291,9 +1361,10 @@ void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64); (((addr) < (high)) && ((addr) >= (low))) #define QLCRD32(adapter, off) \ - (qlcnic_hw_read_wx_2M(adapter, off)) + (adapter->ahw->hw_ops->read_reg)(adapter, off) + #define QLCWR32(adapter, off, val) \ - (qlcnic_hw_write_wx_2M(adapter, off, val)) + adapter->ahw->hw_ops->write_reg(adapter, off, val) int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32); void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int); @@ -1306,10 +1377,6 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int); qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID) #define qlcnic_phy_unlock(a) \ qlcnic_pcie_sem_unlock((a), 3) -#define qlcnic_api_lock(a) \ - qlcnic_pcie_sem_lock((a), 5, 0) -#define qlcnic_api_unlock(a) \ - qlcnic_pcie_sem_unlock((a), 5) #define qlcnic_sw_lock(a) \ qlcnic_pcie_sem_lock((a), 6, 0) #define qlcnic_sw_unlock(a) \ @@ -1324,14 +1391,13 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int); #define MAX_CTL_CHECK 1000 -int qlcnic_get_board_info(struct qlcnic_adapter *adapter); int qlcnic_wol_supported(struct qlcnic_adapter *adapter); -int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate); void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter); void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter); int qlcnic_dump_fw(struct qlcnic_adapter *); /* Functions from qlcnic_init.c */ +void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int); int qlcnic_load_firmware(struct qlcnic_adapter *adapter); int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter); void qlcnic_request_firmware(struct qlcnic_adapter *adapter); @@ -1361,54 +1427,42 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter); int qlcnic_check_fw_status(struct qlcnic_adapter *adapter); void qlcnic_watchdog_task(struct work_struct *work); void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring); + struct qlcnic_host_rds_ring *rds_ring, u8 ring_id); int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max); void qlcnic_set_multi(struct net_device *netdev); +int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *); +int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *); void qlcnic_free_mac_list(struct qlcnic_adapter *adapter); -int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32); -int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter); -int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable); -int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd); -int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable); -void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup); int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); +int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *); int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); netdev_features_t qlcnic_fix_features(struct net_device *netdev, netdev_features_t features); int qlcnic_set_features(struct net_device *netdev, netdev_features_t features); -int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable); int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *); -void qlcnic_fetch_mac(u32, u32, u8, u8 *); -void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); -void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter); -int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode); /* Functions from qlcnic_ethtool.c */ -int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]); +int qlcnic_check_loopback_buff(unsigned char *, u8 []); +int qlcnic_do_lb_test(struct qlcnic_adapter *, u8); +int qlcnic_loopback_test(struct net_device *, u8); /* Functions from qlcnic_main.c */ int qlcnic_reset_context(struct qlcnic_adapter *); -void qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *); void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings); int qlcnic_diag_alloc_res(struct net_device *netdev, int test); netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val); -int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data); -void qlcnic_dev_request_reset(struct qlcnic_adapter *); +int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t); +int qlcnic_validate_max_rss(u8, u8); void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); - -/* Management functions */ -int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*); -int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); -int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); -int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*); +int qlcnic_enable_msix(struct qlcnic_adapter *, u32); /* eSwitch management functions */ int qlcnic_config_switch_port(struct qlcnic_adapter *, struct qlcnic_esw_func_cfg *); + int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *, struct qlcnic_esw_func_cfg *); int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8); @@ -1418,14 +1472,12 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8, struct __qlcnic_esw_statistics *); int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8); int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *); -extern int qlcnic_config_tso; -int qlcnic_napi_add(struct qlcnic_adapter *, struct net_device *); -void qlcnic_napi_del(struct qlcnic_adapter *adapter); -void qlcnic_napi_enable(struct qlcnic_adapter *adapter); -void qlcnic_napi_disable(struct qlcnic_adapter *adapter); +void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd); + int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int); void qlcnic_free_sds_rings(struct qlcnic_recv_context *); +void qlcnic_advert_link_change(struct qlcnic_adapter *, int); void qlcnic_free_tx_rings(struct qlcnic_adapter *); int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *); @@ -1433,6 +1485,9 @@ void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter); void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter); void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter); void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); +void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter); +void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter); + int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); void qlcnic_set_vlan_config(struct qlcnic_adapter *, @@ -1440,6 +1495,22 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *, void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *, struct qlcnic_esw_func_cfg *); +void qlcnic_down(struct qlcnic_adapter *, struct net_device *); +int qlcnic_up(struct qlcnic_adapter *, struct net_device *); +void __qlcnic_down(struct qlcnic_adapter *, struct net_device *); +void qlcnic_detach(struct qlcnic_adapter *); +void qlcnic_teardown_intr(struct qlcnic_adapter *); +int qlcnic_attach(struct qlcnic_adapter *); +int __qlcnic_up(struct qlcnic_adapter *, struct net_device *); +void qlcnic_restore_indev_addr(struct net_device *, unsigned long); + +int qlcnic_check_temp(struct qlcnic_adapter *); +int qlcnic_init_pci_info(struct qlcnic_adapter *); +int qlcnic_set_default_offload_settings(struct qlcnic_adapter *); +int qlcnic_reset_npar_config(struct qlcnic_adapter *); +int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *); +void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, + __le16); /* * QLOGIC Board information */ @@ -1462,6 +1533,277 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) tx_ring->producer; } +struct qlcnic_nic_template { + int (*config_bridged_mode) (struct qlcnic_adapter *, u32); + int (*config_led) (struct qlcnic_adapter *, u32, u32); + int (*start_firmware) (struct qlcnic_adapter *); + int (*init_driver) (struct qlcnic_adapter *); + void (*request_reset) (struct qlcnic_adapter *, u32); + void (*cancel_idc_work) (struct qlcnic_adapter *); + int (*napi_add)(struct qlcnic_adapter *, struct net_device *); + void (*napi_del)(struct qlcnic_adapter *); + void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int); + irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *); +}; + +/* Adapter hardware abstraction */ +struct qlcnic_hardware_ops { + void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); + void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); + int (*read_reg) (struct qlcnic_adapter *, ulong); + int (*write_reg) (struct qlcnic_adapter *, ulong, u32); + void (*get_ocm_win) (struct qlcnic_hardware_context *); + int (*get_mac_address) (struct qlcnic_adapter *, u8 *); + int (*setup_intr) (struct qlcnic_adapter *, u8); + int (*alloc_mbx_args)(struct qlcnic_cmd_args *, + struct qlcnic_adapter *, u32); + int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); + void (*get_func_no) (struct qlcnic_adapter *); + int (*api_lock) (struct qlcnic_adapter *); + void (*api_unlock) (struct qlcnic_adapter *); + void (*add_sysfs) (struct qlcnic_adapter *); + void (*remove_sysfs) (struct qlcnic_adapter *); + void (*process_lb_rcv_ring_diag) (struct qlcnic_host_sds_ring *); + int (*create_rx_ctx) (struct qlcnic_adapter *); + int (*create_tx_ctx) (struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *, int); + int (*setup_link_event) (struct qlcnic_adapter *, int); + int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8); + int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *); + int (*set_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *); + int (*change_macvlan) (struct qlcnic_adapter *, u8*, __le16, u8); + void (*napi_enable) (struct qlcnic_adapter *); + void (*napi_disable) (struct qlcnic_adapter *); + void (*config_intr_coal) (struct qlcnic_adapter *); + int (*config_rss) (struct qlcnic_adapter *, int); + int (*config_hw_lro) (struct qlcnic_adapter *, int); + int (*config_loopback) (struct qlcnic_adapter *, u8); + int (*clear_loopback) (struct qlcnic_adapter *, u8); + int (*config_promisc_mode) (struct qlcnic_adapter *, u32); + void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, __le16); + int (*get_board_info) (struct qlcnic_adapter *); +}; + +extern struct qlcnic_nic_template qlcnic_vf_ops; + +static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter) +{ + return adapter->nic_ops->start_firmware(adapter); +} + +static inline void qlcnic_read_crb(struct qlcnic_adapter *adapter, char *buf, + loff_t offset, size_t size) +{ + adapter->ahw->hw_ops->read_crb(adapter, buf, offset, size); +} + +static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf, + loff_t offset, size_t size) +{ + adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size); +} + +static inline int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, + ulong off) +{ + return adapter->ahw->hw_ops->read_reg(adapter, off); +} + +static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, + ulong off, u32 data) +{ + return adapter->ahw->hw_ops->write_reg(adapter, off, data); +} + +static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, + u8 *mac) +{ + return adapter->ahw->hw_ops->get_mac_address(adapter, mac); +} + +static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) +{ + return adapter->ahw->hw_ops->setup_intr(adapter, num_intr); +} + +static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx, + struct qlcnic_adapter *adapter, u32 arg) +{ + return adapter->ahw->hw_ops->alloc_mbx_args(mbx, adapter, arg); +} + +static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd); +} + +static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->get_func_no(adapter); +} + +static inline int qlcnic_api_lock(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->hw_ops->api_lock(adapter); +} + +static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->api_unlock(adapter); +} + +static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->add_sysfs(adapter); +} + +static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->remove_sysfs(adapter); +} + +static inline void +qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) +{ + sds_ring->adapter->ahw->hw_ops->process_lb_rcv_ring_diag(sds_ring); +} + +static inline int qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->hw_ops->create_rx_ctx(adapter); +} + +static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *ptr, + int ring) +{ + return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring); +} + +static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, + int enable) +{ + return adapter->ahw->hw_ops->setup_link_event(adapter, enable); +} + +static inline int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *info, u8 id) +{ + return adapter->ahw->hw_ops->get_nic_info(adapter, info, id); +} + +static inline int qlcnic_get_pci_info(struct qlcnic_adapter *adapter, + struct qlcnic_pci_info *info) +{ + return adapter->ahw->hw_ops->get_pci_info(adapter, info); +} + +static inline int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *info) +{ + return adapter->ahw->hw_ops->set_nic_info(adapter, info); +} + +static inline int qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, + u8 *addr, __le16 id, u8 cmd) +{ + return adapter->ahw->hw_ops->change_macvlan(adapter, addr, id, cmd); +} + +static inline int qlcnic_napi_add(struct qlcnic_adapter *adapter, + struct net_device *netdev) +{ + return adapter->nic_ops->napi_add(adapter, netdev); +} + +static inline void qlcnic_napi_del(struct qlcnic_adapter *adapter) +{ + adapter->nic_ops->napi_del(adapter); +} + +static inline void qlcnic_napi_enable(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->napi_enable(adapter); +} + +static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->napi_disable(adapter); +} + +static inline void qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->config_intr_coal(adapter); +} + +static inline int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable) +{ + return adapter->ahw->hw_ops->config_rss(adapter, enable); +} + +static inline int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, + int enable) +{ + return adapter->ahw->hw_ops->config_hw_lro(adapter, enable); +} + +static inline int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) +{ + return adapter->ahw->hw_ops->config_loopback(adapter, mode); +} + +static inline int qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) +{ + return adapter->ahw->hw_ops->config_loopback(adapter, mode); +} + +static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, + u32 mode) +{ + return adapter->ahw->hw_ops->config_promisc_mode(adapter, mode); +} + +static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter, + u64 *addr, __le16 id) +{ + adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id); +} + +static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->hw_ops->get_board_info(adapter); +} + +static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, + u32 key) +{ + adapter->nic_ops->request_reset(adapter, key); +} + +static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter) +{ + adapter->nic_ops->cancel_idc_work(adapter); +} + +static inline irqreturn_t +qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter) +{ + return adapter->nic_ops->clear_legacy_intr(adapter); +} + +static inline int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, + u32 rate) +{ + return adapter->nic_ops->config_led(adapter, state, rate); +} + +static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, + __be32 ip, int cmd) +{ + adapter->nic_ops->config_ipaddr(adapter, ip, cmd); +} + static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring) { writel(0, sds_ring->crb_intr_mask); @@ -1480,12 +1822,6 @@ static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring) extern const struct ethtool_ops qlcnic_ethtool_ops; extern const struct ethtool_ops qlcnic_ethtool_failed_ops; -struct qlcnic_nic_template { - int (*config_bridged_mode) (struct qlcnic_adapter *, u32); - int (*config_led) (struct qlcnic_adapter *, u32, u32); - int (*start_firmware) (struct qlcnic_adapter *); -}; - #define QLCDB(adapter, lvl, _fmt, _args...) do { \ if (NETIF_MSG_##lvl & adapter->ahw->msg_enable) \ printk(KERN_INFO "%s: %s: " _fmt, \ @@ -1493,6 +1829,7 @@ struct qlcnic_nic_template { __func__, ##_args); \ } while (0) +#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 #define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter) { @@ -1500,4 +1837,11 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter) return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false; } +static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter) +{ + unsigned short device = adapter->pdev->device; + return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false; +} + + #endif /* __QLCNIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c new file mode 100644 index 000000000000..cd5ae8813cb3 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -0,0 +1,3011 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" +#include <linux/if_vlan.h> +#include <linux/ipv6.h> +#include <linux/ethtool.h> +#include <linux/interrupt.h> + +#define QLCNIC_MAX_TX_QUEUES 1 +#define RSS_HASHTYPE_IP_TCP 0x3 + +/* status descriptor mailbox data + * @phy_addr: physical address of buffer + * @sds_ring_size: buffer size + * @intrpt_id: interrupt id + * @intrpt_val: source of interrupt + */ +struct qlcnic_sds_mbx { + u64 phy_addr; + u8 rsvd1[16]; + u16 sds_ring_size; + u16 rsvd2[3]; + u16 intrpt_id; + u8 intrpt_val; + u8 rsvd3[5]; +} __packed; + +/* receive descriptor buffer data + * phy_addr_reg: physical address of regular buffer + * phy_addr_jmb: physical address of jumbo buffer + * reg_ring_sz: size of regular buffer + * reg_ring_len: no. of entries in regular buffer + * jmb_ring_len: no. of entries in jumbo buffer + * jmb_ring_sz: size of jumbo buffer + */ +struct qlcnic_rds_mbx { + u64 phy_addr_reg; + u64 phy_addr_jmb; + u16 reg_ring_sz; + u16 reg_ring_len; + u16 jmb_ring_sz; + u16 jmb_ring_len; +} __packed; + +/* host producers for regular and jumbo rings */ +struct __host_producer_mbx { + u32 reg_buf; + u32 jmb_buf; +} __packed; + +/* Receive context mailbox data outbox registers + * @state: state of the context + * @vport_id: virtual port id + * @context_id: receive context id + * @num_pci_func: number of pci functions of the port + * @phy_port: physical port id + */ +struct qlcnic_rcv_mbx_out { + u8 rcv_num; + u8 sts_num; + u16 ctx_id; + u8 state; + u8 num_pci_func; + u8 phy_port; + u8 vport_id; + u32 host_csmr[QLCNIC_MAX_RING_SETS]; + struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS]; +} __packed; + +struct qlcnic_add_rings_mbx_out { + u8 rcv_num; + u8 sts_num; + u16 ctx_id; + u32 host_csmr[QLCNIC_MAX_RING_SETS]; + struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS]; +} __packed; + +/* Transmit context mailbox inbox registers + * @phys_addr: DMA address of the transmit buffer + * @cnsmr_index: host consumer index + * @size: legth of transmit buffer ring + * @intr_id: interrput id + * @src: src of interrupt + */ +struct qlcnic_tx_mbx { + u64 phys_addr; + u64 cnsmr_index; + u16 size; + u16 intr_id; + u8 src; + u8 rsvd[3]; +} __packed; + +/* Transmit context mailbox outbox registers + * @host_prod: host producer index + * @ctx_id: transmit context id + * @state: state of the transmit context + */ +struct qlcnic_tx_mbx_out { + u32 host_prod; + u16 ctx_id; + u8 state; + u8 rsvd; +} __packed; + +static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { + {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1}, + {QLCNIC_CMD_CONFIG_INTRPT, 18, 34}, + {QLCNIC_CMD_CREATE_RX_CTX, 136, 27}, + {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1}, + {QLCNIC_CMD_CREATE_TX_CTX, 54, 18}, + {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1}, + {QLCNIC_CMD_CONFIGURE_MAC_LEARNING, 2, 1}, + {QLCNIC_CMD_INTRPT_TEST, 22, 12}, + {QLCNIC_CMD_SET_MTU, 3, 1}, + {QLCNIC_CMD_READ_PHY, 4, 2}, + {QLCNIC_CMD_WRITE_PHY, 5, 1}, + {QLCNIC_CMD_READ_HW_REG, 4, 1}, + {QLCNIC_CMD_GET_FLOW_CTL, 4, 2}, + {QLCNIC_CMD_SET_FLOW_CTL, 4, 1}, + {QLCNIC_CMD_READ_MAX_MTU, 4, 2}, + {QLCNIC_CMD_READ_MAX_LRO, 4, 2}, + {QLCNIC_CMD_MAC_ADDRESS, 4, 3}, + {QLCNIC_CMD_GET_PCI_INFO, 1, 66}, + {QLCNIC_CMD_GET_NIC_INFO, 2, 19}, + {QLCNIC_CMD_SET_NIC_INFO, 32, 1}, + {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3}, + {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1}, + {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3}, + {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1}, + {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1}, + {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3}, + {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1}, + {QLCNIC_CMD_CONFIG_PORT, 4, 1}, + {QLCNIC_CMD_TEMP_SIZE, 1, 4}, + {QLCNIC_CMD_GET_TEMP_HDR, 5, 5}, + {QLCNIC_CMD_GET_LINK_EVENT, 2, 1}, + {QLCNIC_CMD_CONFIG_MAC_VLAN, 4, 3}, + {QLCNIC_CMD_CONFIG_INTR_COAL, 6, 1}, + {QLCNIC_CMD_CONFIGURE_RSS, 14, 1}, + {QLCNIC_CMD_CONFIGURE_LED, 2, 1}, + {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, 2, 1}, + {QLCNIC_CMD_CONFIGURE_HW_LRO, 2, 1}, + {QLCNIC_CMD_GET_STATISTICS, 2, 80}, + {QLCNIC_CMD_SET_PORT_CONFIG, 2, 1}, + {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2}, + {QLCNIC_CMD_GET_LINK_STATUS, 2, 4}, + {QLCNIC_CMD_IDC_ACK, 5, 1}, + {QLCNIC_CMD_INIT_NIC_FUNC, 2, 1}, + {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1}, + {QLCNIC_CMD_SET_LED_CONFIG, 5, 1}, + {QLCNIC_CMD_GET_LED_CONFIG, 1, 5}, + {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26}, +}; + +static const u32 qlcnic_83xx_ext_reg_tbl[] = { + 0x38CC, /* Global Reset */ + 0x38F0, /* Wildcard */ + 0x38FC, /* Informant */ + 0x3038, /* Host MBX ctrl */ + 0x303C, /* FW MBX ctrl */ + 0x355C, /* BOOT LOADER ADDRESS REG */ + 0x3560, /* BOOT LOADER SIZE REG */ + 0x3564, /* FW IMAGE ADDR REG */ + 0x1000, /* MBX intr enable */ + 0x1200, /* Default Intr mask */ + 0x1204, /* Default Interrupt ID */ + 0x3780, /* QLC_83XX_IDC_MAJ_VERSION */ + 0x3784, /* QLC_83XX_IDC_DEV_STATE */ + 0x3788, /* QLC_83XX_IDC_DRV_PRESENCE */ + 0x378C, /* QLC_83XX_IDC_DRV_ACK */ + 0x3790, /* QLC_83XX_IDC_CTRL */ + 0x3794, /* QLC_83XX_IDC_DRV_AUDIT */ + 0x3798, /* QLC_83XX_IDC_MIN_VERSION */ + 0x379C, /* QLC_83XX_RECOVER_DRV_LOCK */ + 0x37A0, /* QLC_83XX_IDC_PF_0 */ + 0x37A4, /* QLC_83XX_IDC_PF_1 */ + 0x37A8, /* QLC_83XX_IDC_PF_2 */ + 0x37AC, /* QLC_83XX_IDC_PF_3 */ + 0x37B0, /* QLC_83XX_IDC_PF_4 */ + 0x37B4, /* QLC_83XX_IDC_PF_5 */ + 0x37B8, /* QLC_83XX_IDC_PF_6 */ + 0x37BC, /* QLC_83XX_IDC_PF_7 */ + 0x37C0, /* QLC_83XX_IDC_PF_8 */ + 0x37C4, /* QLC_83XX_IDC_PF_9 */ + 0x37C8, /* QLC_83XX_IDC_PF_10 */ + 0x37CC, /* QLC_83XX_IDC_PF_11 */ + 0x37D0, /* QLC_83XX_IDC_PF_12 */ + 0x37D4, /* QLC_83XX_IDC_PF_13 */ + 0x37D8, /* QLC_83XX_IDC_PF_14 */ + 0x37DC, /* QLC_83XX_IDC_PF_15 */ + 0x37E0, /* QLC_83XX_IDC_DEV_PARTITION_INFO_1 */ + 0x37E4, /* QLC_83XX_IDC_DEV_PARTITION_INFO_2 */ + 0x37F0, /* QLC_83XX_DRV_OP_MODE */ + 0x37F4, /* QLC_83XX_VNIC_STATE */ + 0x3868, /* QLC_83XX_DRV_LOCK */ + 0x386C, /* QLC_83XX_DRV_UNLOCK */ + 0x3504, /* QLC_83XX_DRV_LOCK_ID */ + 0x34A4, /* QLC_83XX_ASIC_TEMP */ +}; + +static const u32 qlcnic_83xx_reg_tbl[] = { + 0x34A8, /* PEG_HALT_STAT1 */ + 0x34AC, /* PEG_HALT_STAT2 */ + 0x34B0, /* FW_HEARTBEAT */ + 0x3500, /* FLASH LOCK_ID */ + 0x3528, /* FW_CAPABILITIES */ + 0x3538, /* Driver active, DRV_REG0 */ + 0x3540, /* Device state, DRV_REG1 */ + 0x3544, /* Driver state, DRV_REG2 */ + 0x3548, /* Driver scratch, DRV_REG3 */ + 0x354C, /* Device partiton info, DRV_REG4 */ + 0x3524, /* Driver IDC ver, DRV_REG5 */ + 0x3550, /* FW_VER_MAJOR */ + 0x3554, /* FW_VER_MINOR */ + 0x3558, /* FW_VER_SUB */ + 0x359C, /* NPAR STATE */ + 0x35FC, /* FW_IMG_VALID */ + 0x3650, /* CMD_PEG_STATE */ + 0x373C, /* RCV_PEG_STATE */ + 0x37B4, /* ASIC TEMP */ + 0x356C, /* FW API */ + 0x3570, /* DRV OP MODE */ + 0x3850, /* FLASH LOCK */ + 0x3854, /* FLASH UNLOCK */ +}; + +static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = { + .read_crb = qlcnic_83xx_read_crb, + .write_crb = qlcnic_83xx_write_crb, + .read_reg = qlcnic_83xx_rd_reg_indirect, + .write_reg = qlcnic_83xx_wrt_reg_indirect, + .get_mac_address = qlcnic_83xx_get_mac_address, + .setup_intr = qlcnic_83xx_setup_intr, + .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, + .mbx_cmd = qlcnic_83xx_mbx_op, + .get_func_no = qlcnic_83xx_get_func_no, + .api_lock = qlcnic_83xx_cam_lock, + .api_unlock = qlcnic_83xx_cam_unlock, + .add_sysfs = qlcnic_83xx_add_sysfs, + .remove_sysfs = qlcnic_83xx_remove_sysfs, + .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, + .create_rx_ctx = qlcnic_83xx_create_rx_ctx, + .create_tx_ctx = qlcnic_83xx_create_tx_ctx, + .setup_link_event = qlcnic_83xx_setup_link_event, + .get_nic_info = qlcnic_83xx_get_nic_info, + .get_pci_info = qlcnic_83xx_get_pci_info, + .set_nic_info = qlcnic_83xx_set_nic_info, + .change_macvlan = qlcnic_83xx_sre_macaddr_change, + .napi_enable = qlcnic_83xx_napi_enable, + .napi_disable = qlcnic_83xx_napi_disable, + .config_intr_coal = qlcnic_83xx_config_intr_coal, + .config_rss = qlcnic_83xx_config_rss, + .config_hw_lro = qlcnic_83xx_config_hw_lro, + .config_promisc_mode = qlcnic_83xx_nic_set_promisc, + .change_l2_filter = qlcnic_83xx_change_l2_filter, + .get_board_info = qlcnic_83xx_get_port_info, +}; + +static struct qlcnic_nic_template qlcnic_83xx_ops = { + .config_bridged_mode = qlcnic_config_bridged_mode, + .config_led = qlcnic_config_led, + .request_reset = qlcnic_83xx_idc_request_reset, + .cancel_idc_work = qlcnic_83xx_idc_exit, + .napi_add = qlcnic_83xx_napi_add, + .napi_del = qlcnic_83xx_napi_del, + .config_ipaddr = qlcnic_83xx_config_ipaddr, + .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, +}; + +void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw) +{ + ahw->hw_ops = &qlcnic_83xx_hw_ops; + ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl; + ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl; +} + +int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *adapter) +{ + u32 fw_major, fw_minor, fw_build; + struct pci_dev *pdev = adapter->pdev; + + fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR); + fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR); + fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB); + adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build); + + dev_info(&pdev->dev, "Driver v%s, firmware version %d.%d.%d\n", + QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build); + + return adapter->fw_version; +} + +static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr) +{ + void __iomem *base; + u32 val; + + base = adapter->ahw->pci_base0 + + QLC_83XX_CRB_WIN_FUNC(adapter->ahw->pci_func); + writel(addr, base); + val = readl(base); + if (val != addr) + return -EIO; + + return 0; +} + +int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr) +{ + int ret; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + ret = __qlcnic_set_win_base(adapter, (u32) addr); + if (!ret) { + return QLCRDX(ahw, QLCNIC_WILDCARD); + } else { + dev_err(&adapter->pdev->dev, + "%s failed, addr = 0x%x\n", __func__, (int)addr); + return -EIO; + } +} + +int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr, + u32 data) +{ + int err; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + err = __qlcnic_set_win_base(adapter, (u32) addr); + if (!err) { + QLCWRX(ahw, QLCNIC_WILDCARD, data); + return 0; + } else { + dev_err(&adapter->pdev->dev, + "%s failed, addr = 0x%x data = 0x%x\n", + __func__, (int)addr, data); + return err; + } +} + +int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) +{ + int err, i, num_msix; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (!num_intr) + num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS; + num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(), + num_intr)); + /* account for AEN interrupt MSI-X based interrupts */ + num_msix += 1; + num_msix += adapter->max_drv_tx_rings; + err = qlcnic_enable_msix(adapter, num_msix); + if (err == -ENOMEM) + return err; + if (adapter->flags & QLCNIC_MSIX_ENABLED) + num_msix = adapter->ahw->num_msix; + else + num_msix = 1; + /* setup interrupt mapping table for fw */ + ahw->intr_tbl = vzalloc(num_msix * + sizeof(struct qlcnic_intrpt_config)); + if (!ahw->intr_tbl) + return -ENOMEM; + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { + /* MSI-X enablement failed, use legacy interrupt */ + adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR; + adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK; + adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR; + adapter->msix_entries[0].vector = adapter->pdev->irq; + dev_info(&adapter->pdev->dev, "using legacy interrupt\n"); + } + + for (i = 0; i < num_msix; i++) { + if (adapter->flags & QLCNIC_MSIX_ENABLED) + ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX; + else + ahw->intr_tbl[i].type = QLCNIC_INTRPT_INTX; + ahw->intr_tbl[i].id = i; + ahw->intr_tbl[i].src = 0; + } + return 0; +} + +inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter) +{ + writel(0, adapter->tgt_mask_reg); +} + +/* Enable MSI-x and INT-x interrupts */ +void qlcnic_83xx_enable_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring) +{ + writel(0, sds_ring->crb_intr_mask); +} + +/* Disable MSI-x and INT-x interrupts */ +void qlcnic_83xx_disable_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring) +{ + writel(1, sds_ring->crb_intr_mask); +} + +inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter + *adapter) +{ + u32 mask; + + /* Mailbox in MSI-x mode and Legacy Interrupt share the same + * source register. We could be here before contexts are created + * and sds_ring->crb_intr_mask has not been initialized, calculate + * BAR offset for Interrupt Source Register + */ + mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); + writel(0, adapter->ahw->pci_base0 + mask); +} + +inline void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter) +{ + u32 mask; + + mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); + writel(1, adapter->ahw->pci_base0 + mask); +} + +static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + int i; + for (i = 0; i < cmd->rsp.num; i++) + cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i)); +} + +irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter) +{ + u32 intr_val; + struct qlcnic_hardware_context *ahw = adapter->ahw; + int retries = 0; + + intr_val = readl(adapter->tgt_status_reg); + + if (!QLC_83XX_VALID_INTX_BIT31(intr_val)) + return IRQ_NONE; + + if (QLC_83XX_INTX_FUNC(intr_val) != adapter->ahw->pci_func) { + adapter->stats.spurious_intr++; + return IRQ_NONE; + } + /* The barrier is required to ensure writes to the registers */ + wmb(); + + /* clear the interrupt trigger control register */ + writel(0, adapter->isr_int_vec); + intr_val = readl(adapter->isr_int_vec); + do { + intr_val = readl(adapter->tgt_status_reg); + if (QLC_83XX_INTX_FUNC(intr_val) != ahw->pci_func) + break; + retries++; + } while (QLC_83XX_VALID_INTX_BIT30(intr_val) && + (retries < QLC_83XX_LEGACY_INTX_MAX_RETRY)); + + return IRQ_HANDLED; +} + +static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter) +{ + u32 resp, event; + unsigned long flags; + + spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); + + resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); + if (!(resp & QLCNIC_SET_OWNER)) + goto out; + + event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); + if (event & QLCNIC_MBX_ASYNC_EVENT) + qlcnic_83xx_process_aen(adapter); +out: + qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); + spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); +} + +irqreturn_t qlcnic_83xx_intr(int irq, void *data) +{ + struct qlcnic_adapter *adapter = data; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (qlcnic_83xx_clear_legacy_intr(adapter) == IRQ_NONE) + return IRQ_NONE; + + qlcnic_83xx_poll_process_aen(adapter); + + if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) { + ahw->diag_cnt++; + qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); + return IRQ_HANDLED; + } + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); + } else { + sds_ring = &adapter->recv_ctx->sds_rings[0]; + napi_schedule(&sds_ring->napi); + } + + return IRQ_HANDLED; +} + +irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + struct qlcnic_adapter *adapter = sds_ring->adapter; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + goto done; + + if (adapter->nic_ops->clear_legacy_intr(adapter) == IRQ_NONE) + return IRQ_NONE; + +done: + adapter->ahw->diag_cnt++; + qlcnic_83xx_enable_intr(adapter, sds_ring); + + return IRQ_HANDLED; +} + +void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter) +{ + u32 val = 0, num_msix = adapter->ahw->num_msix - 1; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + num_msix = adapter->ahw->num_msix - 1; + else + num_msix = 0; + + QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val); + + qlcnic_83xx_disable_mbx_intr(adapter); + + msleep(20); + synchronize_irq(adapter->msix_entries[num_msix].vector); + free_irq(adapter->msix_entries[num_msix].vector, adapter); +} + +int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter) +{ + irq_handler_t handler; + u32 val; + char name[32]; + int err = 0; + unsigned long flags = 0; + + if (!(adapter->flags & QLCNIC_MSI_ENABLED) && + !(adapter->flags & QLCNIC_MSIX_ENABLED)) + flags |= IRQF_SHARED; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + handler = qlcnic_83xx_handle_aen; + val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector; + snprintf(name, (IFNAMSIZ + 4), + "%s[%s]", "qlcnic", "aen"); + err = request_irq(val, handler, flags, name, adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "failed to register MBX interrupt\n"); + return err; + } + } else { + handler = qlcnic_83xx_intr; + val = adapter->msix_entries[0].vector; + err = request_irq(val, handler, flags, "qlcnic", adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "failed to register INTx interrupt\n"); + return err; + } + qlcnic_83xx_clear_legacy_intr_mask(adapter); + } + + /* Enable mailbox interrupt */ + qlcnic_83xx_enable_mbx_intrpt(adapter); + + return err; +} + +void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter) +{ + u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT); + adapter->ahw->pci_func = val & 0xf; +} + +int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter) +{ + void __iomem *addr; + u32 val, limit = 0; + + struct qlcnic_hardware_context *ahw = adapter->ahw; + + addr = ahw->pci_base0 + QLC_83XX_SEM_LOCK_FUNC(ahw->pci_func); + do { + val = readl(addr); + if (val) { + /* write the function number to register */ + QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, + ahw->pci_func); + return 0; + } + usleep_range(1000, 2000); + } while (++limit <= QLCNIC_PCIE_SEM_TIMEOUT); + + return -EIO; +} + +void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter) +{ + void __iomem *addr; + u32 val; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + addr = ahw->pci_base0 + QLC_83XX_SEM_UNLOCK_FUNC(ahw->pci_func); + val = readl(addr); +} + +void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf, + loff_t offset, size_t size) +{ + int ret; + u32 data; + + if (qlcnic_api_lock(adapter)) { + dev_err(&adapter->pdev->dev, + "%s: failed to acquire lock. addr offset 0x%x\n", + __func__, (u32)offset); + return; + } + + ret = qlcnic_83xx_rd_reg_indirect(adapter, (u32) offset); + qlcnic_api_unlock(adapter); + + if (ret == -EIO) { + dev_err(&adapter->pdev->dev, + "%s: failed. addr offset 0x%x\n", + __func__, (u32)offset); + return; + } + data = ret; + memcpy(buf, &data, size); +} + +void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf, + loff_t offset, size_t size) +{ + u32 data; + + memcpy(&data, buf, size); + qlcnic_83xx_wrt_reg_indirect(adapter, (u32) offset, data); +} + +int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter) +{ + int status; + + status = qlcnic_83xx_get_port_config(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "Get Port Info failed\n"); + } else { + if (QLC_83XX_SFP_10G_CAPABLE(adapter->ahw->port_config)) + adapter->ahw->port_type = QLCNIC_XGBE; + else + adapter->ahw->port_type = QLCNIC_GBE; + + if (QLC_83XX_AUTONEG(adapter->ahw->port_config)) + adapter->ahw->link_autoneg = AUTONEG_ENABLE; + } + return status; +} + +void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter) +{ + u32 val; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + val = BIT_2 | ((adapter->ahw->num_msix - 1) << 8); + else + val = BIT_2; + + QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val); + qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); +} + +void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter, + const struct pci_device_id *ent) +{ + u32 op_mode, priv_level; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + ahw->fw_hal_version = 2; + qlcnic_get_func_no(adapter); + + /* Determine function privilege level */ + op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); + if (op_mode == QLC_83XX_DEFAULT_OPMODE) + priv_level = QLCNIC_MGMT_FUNC; + else + priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode, + ahw->pci_func); + + if (priv_level == QLCNIC_NON_PRIV_FUNC) { + ahw->op_mode = QLCNIC_NON_PRIV_FUNC; + dev_info(&adapter->pdev->dev, + "HAL Version: %d Non Privileged function\n", + ahw->fw_hal_version); + adapter->nic_ops = &qlcnic_vf_ops; + } else { + adapter->nic_ops = &qlcnic_83xx_ops; + } +} + +static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, + u32 data[]); +static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter, + u32 data[]); + +static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + int i; + + dev_info(&adapter->pdev->dev, + "Host MBX regs(%d)\n", cmd->req.num); + for (i = 0; i < cmd->req.num; i++) { + if (i && !(i % 8)) + pr_info("\n"); + pr_info("%08x ", cmd->req.arg[i]); + } + pr_info("\n"); + dev_info(&adapter->pdev->dev, + "FW MBX regs(%d)\n", cmd->rsp.num); + for (i = 0; i < cmd->rsp.num; i++) { + if (i && !(i % 8)) + pr_info("\n"); + pr_info("%08x ", cmd->rsp.arg[i]); + } + pr_info("\n"); +} + +/* Mailbox response for mac rcode */ +static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) +{ + u32 fw_data; + u8 mac_cmd_rcode; + + fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2)); + mac_cmd_rcode = (u8)fw_data; + if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE || + mac_cmd_rcode == QLC_83XX_MAC_PRESENT || + mac_cmd_rcode == QLC_83XX_MAC_ABSENT) + return QLCNIC_RCODE_SUCCESS; + return 1; +} + +static u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) +{ + u32 data; + unsigned long wait_time = 0; + struct qlcnic_hardware_context *ahw = adapter->ahw; + /* wait for mailbox completion */ + do { + data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); + if (++wait_time > QLCNIC_MBX_TIMEOUT) { + data = QLCNIC_RCODE_TIMEOUT; + break; + } + mdelay(1); + } while (!data); + return data; +} + +int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + int i; + u16 opcode; + u8 mbx_err_code; + unsigned long flags; + u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + opcode = LSW(cmd->req.arg[0]); + if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { + dev_info(&adapter->pdev->dev, + "Mailbox cmd attempted, 0x%x\n", opcode); + dev_info(&adapter->pdev->dev, "Mailbox detached\n"); + return 0; + } + + spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); + mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); + + if (mbx_val) { + QLCDB(adapter, DRV, + "Mailbox cmd attempted, 0x%x\n", opcode); + QLCDB(adapter, DRV, + "Mailbox not available, 0x%x, collect FW dump\n", + mbx_val); + cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; + spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); + return cmd->rsp.arg[0]; + } + + /* Fill in mailbox registers */ + mbx_cmd = cmd->req.arg[0]; + writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); + for (i = 1; i < cmd->req.num; i++) + writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i)); + + /* Signal FW about the impending command */ + QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); +poll: + rsp = qlcnic_83xx_mbx_poll(adapter); + if (rsp != QLCNIC_RCODE_TIMEOUT) { + /* Get the FW response data */ + fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); + if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { + qlcnic_83xx_process_aen(adapter); + mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); + if (mbx_val) + goto poll; + } + mbx_err_code = QLCNIC_MBX_STATUS(fw_data); + rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); + opcode = QLCNIC_MBX_RSP(fw_data); + qlcnic_83xx_get_mbx_data(adapter, cmd); + + switch (mbx_err_code) { + case QLCNIC_MBX_RSP_OK: + case QLCNIC_MBX_PORT_RSP_OK: + rsp = QLCNIC_RCODE_SUCCESS; + break; + default: + if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) { + rsp = qlcnic_83xx_mac_rcode(adapter); + if (!rsp) + goto out; + } + dev_err(&adapter->pdev->dev, + "MBX command 0x%x failed with err:0x%x\n", + opcode, mbx_err_code); + rsp = mbx_err_code; + qlcnic_dump_mbx(adapter, cmd); + break; + } + goto out; + } + + dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n", + QLCNIC_MBX_RSP(mbx_cmd)); + rsp = QLCNIC_RCODE_TIMEOUT; +out: + /* clear fw mbx control register */ + QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); + spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); + return rsp; +} + +int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, + struct qlcnic_adapter *adapter, u32 type) +{ + int i, size; + u32 temp; + const struct qlcnic_mailbox_metadata *mbx_tbl; + + mbx_tbl = qlcnic_83xx_mbx_tbl; + size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl); + for (i = 0; i < size; i++) { + if (type == mbx_tbl[i].cmd) { + mbx->req.num = mbx_tbl[i].in_args; + mbx->rsp.num = mbx_tbl[i].out_args; + mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), + GFP_ATOMIC); + if (!mbx->req.arg) + return -ENOMEM; + mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32), + GFP_ATOMIC); + if (!mbx->rsp.arg) { + kfree(mbx->req.arg); + mbx->req.arg = NULL; + return -ENOMEM; + } + memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); + memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); + temp = adapter->ahw->fw_hal_version << 29; + mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp); + break; + } + } + return 0; +} + +void qlcnic_83xx_idc_aen_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter; + struct qlcnic_cmd_args cmd; + int i, err = 0; + + adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work); + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK); + + for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++) + cmd.req.arg[i] = adapter->ahw->mbox_aen[i]; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_info(&adapter->pdev->dev, + "%s: Mailbox IDC ACK failed.\n", __func__); + qlcnic_free_mbx_args(&cmd); +} + +static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter, + u32 data[]) +{ + dev_dbg(&adapter->pdev->dev, "Completion AEN:0x%x.\n", + QLCNIC_MBX_RSP(data[0])); + clear_bit(QLC_83XX_IDC_COMP_AEN, &adapter->ahw->idc.status); + return; +} + +void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) +{ + u32 event[QLC_83XX_MBX_AEN_CNT]; + int i; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++) + event[i] = readl(QLCNIC_MBX_FW(ahw, i)); + + switch (QLCNIC_MBX_RSP(event[0])) { + + case QLCNIC_MBX_LINK_EVENT: + qlcnic_83xx_handle_link_aen(adapter, event); + break; + case QLCNIC_MBX_COMP_EVENT: + qlcnic_83xx_handle_idc_comp_aen(adapter, event); + break; + case QLCNIC_MBX_REQUEST_EVENT: + for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++) + adapter->ahw->mbox_aen[i] = QLCNIC_MBX_RSP(event[i]); + queue_delayed_work(adapter->qlcnic_wq, + &adapter->idc_aen_work, 0); + break; + case QLCNIC_MBX_TIME_EXTEND_EVENT: + break; + case QLCNIC_MBX_SFP_INSERT_EVENT: + dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n", + QLCNIC_MBX_RSP(event[0])); + break; + case QLCNIC_MBX_SFP_REMOVE_EVENT: + dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n", + QLCNIC_MBX_RSP(event[0])); + break; + default: + dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n", + QLCNIC_MBX_RSP(event[0])); + break; + } + + QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); +} + +static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) +{ + int index, i, err, sds_mbx_size; + u32 *buf, intrpt_id, intr_mask; + u16 context_id; + u8 num_sds; + struct qlcnic_cmd_args cmd; + struct qlcnic_host_sds_ring *sds; + struct qlcnic_sds_mbx sds_mbx; + struct qlcnic_add_rings_mbx_out *mbx_out; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + sds_mbx_size = sizeof(struct qlcnic_sds_mbx); + context_id = recv_ctx->context_id; + num_sds = (adapter->max_sds_rings - QLCNIC_MAX_RING_SETS); + ahw->hw_ops->alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_ADD_RCV_RINGS); + cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16); + + /* set up status rings, mbx 2-81 */ + index = 2; + for (i = 8; i < adapter->max_sds_rings; i++) { + memset(&sds_mbx, 0, sds_mbx_size); + sds = &recv_ctx->sds_rings[i]; + sds->consumer = 0; + memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); + sds_mbx.phy_addr = sds->phys_addr; + sds_mbx.sds_ring_size = sds->num_desc; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + intrpt_id = ahw->intr_tbl[i].id; + else + intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); + + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) + sds_mbx.intrpt_id = intrpt_id; + else + sds_mbx.intrpt_id = 0xffff; + sds_mbx.intrpt_val = 0; + buf = &cmd.req.arg[index]; + memcpy(buf, &sds_mbx, sds_mbx_size); + index += sds_mbx_size / sizeof(u32); + } + + /* send the mailbox command */ + err = ahw->hw_ops->mbx_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to add rings %d\n", err); + goto out; + } + + mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1]; + index = 0; + /* status descriptor ring */ + for (i = 8; i < adapter->max_sds_rings; i++) { + sds = &recv_ctx->sds_rings[i]; + sds->crb_sts_consumer = ahw->pci_base0 + + mbx_out->host_csmr[index]; + if (adapter->flags & QLCNIC_MSIX_ENABLED) + intr_mask = ahw->intr_tbl[i].src; + else + intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK); + + sds->crb_intr_mask = ahw->pci_base0 + intr_mask; + index++; + } +out: + qlcnic_free_mbx_args(&cmd); + return err; +} + +int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) +{ + int i, err, index, sds_mbx_size, rds_mbx_size; + u8 num_sds, num_rds; + u32 *buf, intrpt_id, intr_mask, cap = 0; + struct qlcnic_host_sds_ring *sds; + struct qlcnic_host_rds_ring *rds; + struct qlcnic_sds_mbx sds_mbx; + struct qlcnic_rds_mbx rds_mbx; + struct qlcnic_cmd_args cmd; + struct qlcnic_rcv_mbx_out *mbx_out; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_hardware_context *ahw = adapter->ahw; + num_rds = adapter->max_rds_rings; + + if (adapter->max_sds_rings <= QLCNIC_MAX_RING_SETS) + num_sds = adapter->max_sds_rings; + else + num_sds = QLCNIC_MAX_RING_SETS; + + sds_mbx_size = sizeof(struct qlcnic_sds_mbx); + rds_mbx_size = sizeof(struct qlcnic_rds_mbx); + cap = QLCNIC_CAP0_LEGACY_CONTEXT; + + if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) + cap |= QLC_83XX_FW_CAP_LRO_MSS; + + /* set mailbox hdr and capabilities */ + qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_CREATE_RX_CTX); + cmd.req.arg[1] = cap; + cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) | + (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16); + /* set up status rings, mbx 8-57/87 */ + index = QLC_83XX_HOST_SDS_MBX_IDX; + for (i = 0; i < num_sds; i++) { + memset(&sds_mbx, 0, sds_mbx_size); + sds = &recv_ctx->sds_rings[i]; + sds->consumer = 0; + memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); + sds_mbx.phy_addr = sds->phys_addr; + sds_mbx.sds_ring_size = sds->num_desc; + if (adapter->flags & QLCNIC_MSIX_ENABLED) + intrpt_id = ahw->intr_tbl[i].id; + else + intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) + sds_mbx.intrpt_id = intrpt_id; + else + sds_mbx.intrpt_id = 0xffff; + sds_mbx.intrpt_val = 0; + buf = &cmd.req.arg[index]; + memcpy(buf, &sds_mbx, sds_mbx_size); + index += sds_mbx_size / sizeof(u32); + } + /* set up receive rings, mbx 88-111/135 */ + index = QLCNIC_HOST_RDS_MBX_IDX; + rds = &recv_ctx->rds_rings[0]; + rds->producer = 0; + memset(&rds_mbx, 0, rds_mbx_size); + rds_mbx.phy_addr_reg = rds->phys_addr; + rds_mbx.reg_ring_sz = rds->dma_size; + rds_mbx.reg_ring_len = rds->num_desc; + /* Jumbo ring */ + rds = &recv_ctx->rds_rings[1]; + rds->producer = 0; + rds_mbx.phy_addr_jmb = rds->phys_addr; + rds_mbx.jmb_ring_sz = rds->dma_size; + rds_mbx.jmb_ring_len = rds->num_desc; + buf = &cmd.req.arg[index]; + memcpy(buf, &rds_mbx, rds_mbx_size); + + /* send the mailbox command */ + err = ahw->hw_ops->mbx_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to create Rx ctx in firmware%d\n", err); + goto out; + } + mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd.rsp.arg[1]; + recv_ctx->context_id = mbx_out->ctx_id; + recv_ctx->state = mbx_out->state; + recv_ctx->virt_port = mbx_out->vport_id; + dev_info(&adapter->pdev->dev, "Rx Context[%d] Created, state:0x%x\n", + recv_ctx->context_id, recv_ctx->state); + /* Receive descriptor ring */ + /* Standard ring */ + rds = &recv_ctx->rds_rings[0]; + rds->crb_rcv_producer = ahw->pci_base0 + + mbx_out->host_prod[0].reg_buf; + /* Jumbo ring */ + rds = &recv_ctx->rds_rings[1]; + rds->crb_rcv_producer = ahw->pci_base0 + + mbx_out->host_prod[0].jmb_buf; + /* status descriptor ring */ + for (i = 0; i < num_sds; i++) { + sds = &recv_ctx->sds_rings[i]; + sds->crb_sts_consumer = ahw->pci_base0 + + mbx_out->host_csmr[i]; + if (adapter->flags & QLCNIC_MSIX_ENABLED) + intr_mask = ahw->intr_tbl[i].src; + else + intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK); + sds->crb_intr_mask = ahw->pci_base0 + intr_mask; + } + + if (adapter->max_sds_rings > QLCNIC_MAX_RING_SETS) + err = qlcnic_83xx_add_rings(adapter); +out: + qlcnic_free_mbx_args(&cmd); + return err; +} + +int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx, int ring) +{ + int err; + u16 msix_id; + u32 *buf, intr_mask; + struct qlcnic_cmd_args cmd; + struct qlcnic_tx_mbx mbx; + struct qlcnic_tx_mbx_out *mbx_out; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + /* Reset host resources */ + tx->producer = 0; + tx->sw_consumer = 0; + *(tx->hw_consumer) = 0; + + memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx)); + + /* setup mailbox inbox registerss */ + mbx.phys_addr = tx->phys_addr; + mbx.cnsmr_index = tx->hw_cons_phys_addr; + mbx.size = tx->num_desc; + if (adapter->flags & QLCNIC_MSIX_ENABLED) + msix_id = ahw->intr_tbl[adapter->max_sds_rings + ring].id; + else + msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) + mbx.intr_id = msix_id; + else + mbx.intr_id = 0xffff; + mbx.src = 0; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT; + cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES; + buf = &cmd.req.arg[6]; + memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx)); + /* send the mailbox command*/ + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to create Tx ctx in firmware 0x%x\n", err); + goto out; + } + mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2]; + tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod; + tx->ctx_id = mbx_out->ctx_id; + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src; + tx->crb_intr_mask = ahw->pci_base0 + intr_mask; + } + dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n", + tx->ctx_id, mbx_out->state); +out: + qlcnic_free_mbx_args(&cmd); + return err; +} + +static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_rds_ring *rds_ring; + u8 ring; + int ret; + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + + qlcnic_detach(adapter); + + adapter->max_sds_rings = 1; + adapter->ahw->diag_test = test; + adapter->ahw->linkup = 0; + + ret = qlcnic_attach(adapter); + if (ret) { + netif_device_attach(netdev); + return ret; + } + + ret = qlcnic_fw_create_ctx(adapter); + if (ret) { + qlcnic_detach(adapter); + netif_device_attach(netdev); + return ret; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx->rds_rings[ring]; + qlcnic_post_rx_buffers(adapter, rds_ring, ring); + } + + if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &adapter->recv_ctx->sds_rings[ring]; + qlcnic_83xx_enable_intr(adapter, sds_ring); + } + } + + if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { + /* disable and free mailbox interrupt */ + qlcnic_83xx_free_mbx_intr(adapter); + adapter->ahw->loopback_state = 0; + adapter->ahw->hw_ops->setup_link_event(adapter, 1); + } + + set_bit(__QLCNIC_DEV_UP, &adapter->state); + return 0; +} + +static void qlcnic_83xx_diag_free_res(struct net_device *netdev, + int max_sds_rings) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_sds_ring *sds_ring; + int ring, err; + + clear_bit(__QLCNIC_DEV_UP, &adapter->state); + if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &adapter->recv_ctx->sds_rings[ring]; + qlcnic_83xx_disable_intr(adapter, sds_ring); + } + } + + qlcnic_fw_destroy_ctx(adapter); + qlcnic_detach(adapter); + + if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { + err = qlcnic_83xx_setup_mbx_intr(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "%s: failed to setup mbx interrupt\n", + __func__); + goto out; + } + } + adapter->ahw->diag_test = 0; + adapter->max_sds_rings = max_sds_rings; + + if (qlcnic_attach(adapter)) + goto out; + + if (netif_running(netdev)) + __qlcnic_up(adapter, netdev); +out: + netif_device_attach(netdev); +} + +int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state, + u32 beacon) +{ + struct qlcnic_cmd_args cmd; + u32 mbx_in; + int i, status = 0; + + if (state) { + /* Get LED configuration */ + qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_LED_CONFIG); + status = qlcnic_issue_cmd(adapter, &cmd); + if (status) { + dev_err(&adapter->pdev->dev, + "Get led config failed.\n"); + goto mbx_err; + } else { + for (i = 0; i < 4; i++) + adapter->ahw->mbox_reg[i] = cmd.rsp.arg[i+1]; + } + qlcnic_free_mbx_args(&cmd); + /* Set LED Configuration */ + mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) | + LSW(QLC_83XX_LED_CONFIG); + qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_SET_LED_CONFIG); + cmd.req.arg[1] = mbx_in; + cmd.req.arg[2] = mbx_in; + cmd.req.arg[3] = mbx_in; + if (beacon) + cmd.req.arg[4] = QLC_83XX_ENABLE_BEACON; + status = qlcnic_issue_cmd(adapter, &cmd); + if (status) { + dev_err(&adapter->pdev->dev, + "Set led config failed.\n"); + } +mbx_err: + qlcnic_free_mbx_args(&cmd); + return status; + + } else { + /* Restoring default LED configuration */ + qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_SET_LED_CONFIG); + cmd.req.arg[1] = adapter->ahw->mbox_reg[0]; + cmd.req.arg[2] = adapter->ahw->mbox_reg[1]; + cmd.req.arg[3] = adapter->ahw->mbox_reg[2]; + if (beacon) + cmd.req.arg[4] = adapter->ahw->mbox_reg[3]; + status = qlcnic_issue_cmd(adapter, &cmd); + if (status) + dev_err(&adapter->pdev->dev, + "Restoring led config failed.\n"); + qlcnic_free_mbx_args(&cmd); + return status; + } +} + +void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter, + int enable) +{ + struct qlcnic_cmd_args cmd; + int status; + + if (enable) { + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC); + cmd.req.arg[1] = BIT_0 | BIT_31; + } else { + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC); + cmd.req.arg[1] = BIT_0 | BIT_31; + } + status = qlcnic_issue_cmd(adapter, &cmd); + if (status) + dev_err(&adapter->pdev->dev, + "Failed to %s in NIC IDC function event.\n", + (enable ? "register" : "unregister")); + + qlcnic_free_mbx_args(&cmd); +} + +int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter) +{ + struct qlcnic_cmd_args cmd; + int err; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG); + cmd.req.arg[1] = adapter->ahw->port_config; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_info(&adapter->pdev->dev, "Set Port Config failed.\n"); + qlcnic_free_mbx_args(&cmd); + return err; +} + +int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter) +{ + struct qlcnic_cmd_args cmd; + int err; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG); + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_info(&adapter->pdev->dev, "Get Port config failed\n"); + else + adapter->ahw->port_config = cmd.rsp.arg[1]; + qlcnic_free_mbx_args(&cmd); + return err; +} + +int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable) +{ + int err; + u32 temp; + struct qlcnic_cmd_args cmd; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT); + temp = adapter->recv_ctx->context_id << 16; + cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_info(&adapter->pdev->dev, + "Setup linkevent mailbox failed\n"); + qlcnic_free_mbx_args(&cmd); + return err; +} + +int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) +{ + int err; + u32 temp; + struct qlcnic_cmd_args cmd; + + if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) + return -EIO; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); + temp = adapter->recv_ctx->context_id << 16; + cmd.req.arg[1] = (mode ? 1 : 0) | temp; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_info(&adapter->pdev->dev, + "Promiscous mode config failed\n"); + qlcnic_free_mbx_args(&cmd); + + return err; +} + +int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings; + + QLCDB(adapter, DRV, "%s loopback test in progress\n", + mode == QLCNIC_ILB_MODE ? "internal" : "external"); + if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { + dev_warn(&adapter->pdev->dev, + "Loopback test not supported for non privilege function\n"); + return ret; + } + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); + if (ret) + goto fail_diag_alloc; + + ret = qlcnic_83xx_set_lb_mode(adapter, mode); + if (ret) + goto free_diag_res; + + /* Poll for link up event before running traffic */ + do { + msleep(500); + qlcnic_83xx_process_aen(adapter); + if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { + dev_info(&adapter->pdev->dev, + "Firmware didn't sent link up event to loopback request\n"); + ret = -QLCNIC_FW_NOT_RESPOND; + qlcnic_83xx_clear_lb_mode(adapter, mode); + goto free_diag_res; + } + } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); + + ret = qlcnic_do_lb_test(adapter, mode); + + qlcnic_83xx_clear_lb_mode(adapter, mode); + +free_diag_res: + qlcnic_83xx_diag_free_res(netdev, max_sds_rings); + +fail_diag_alloc: + adapter->max_sds_rings = max_sds_rings; + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return ret; +} + +int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int status = 0, loop = 0; + u32 config; + + status = qlcnic_83xx_get_port_config(adapter); + if (status) + return status; + + config = ahw->port_config; + set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + + if (mode == QLCNIC_ILB_MODE) + ahw->port_config |= QLC_83XX_CFG_LOOPBACK_HSS; + if (mode == QLCNIC_ELB_MODE) + ahw->port_config |= QLC_83XX_CFG_LOOPBACK_EXT; + + status = qlcnic_83xx_set_port_config(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "Failed to Set Loopback Mode = 0x%x.\n", + ahw->port_config); + ahw->port_config = config; + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + return status; + } + + /* Wait for Link and IDC Completion AEN */ + do { + msleep(300); + qlcnic_83xx_process_aen(adapter); + if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { + dev_err(&adapter->pdev->dev, + "FW did not generate IDC completion AEN\n"); + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + qlcnic_83xx_clear_lb_mode(adapter, mode); + return -EIO; + } + } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); + + qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0, + QLCNIC_MAC_ADD); + return status; +} + +int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int status = 0, loop = 0; + u32 config = ahw->port_config; + + set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + if (mode == QLCNIC_ILB_MODE) + ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS; + if (mode == QLCNIC_ELB_MODE) + ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_EXT; + + status = qlcnic_83xx_set_port_config(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "Failed to Clear Loopback Mode = 0x%x.\n", + ahw->port_config); + ahw->port_config = config; + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + return status; + } + + /* Wait for Link and IDC Completion AEN */ + do { + msleep(300); + qlcnic_83xx_process_aen(adapter); + if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { + dev_err(&adapter->pdev->dev, + "Firmware didn't sent IDC completion AEN\n"); + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + return -EIO; + } + } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); + + qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0, + QLCNIC_MAC_DEL); + return status; +} + +void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, + int mode) +{ + int err; + u32 temp, temp_ip; + struct qlcnic_cmd_args cmd; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR); + if (mode == QLCNIC_IP_UP) { + temp = adapter->recv_ctx->context_id << 16; + cmd.req.arg[1] = 1 | temp; + } else { + temp = adapter->recv_ctx->context_id << 16; + cmd.req.arg[1] = 2 | temp; + } + + /* + * Adapter needs IP address in network byte order. + * But hardware mailbox registers go through writel(), hence IP address + * gets swapped on big endian architecture. + * To negate swapping of writel() on big endian architecture + * use swab32(value). + */ + + temp_ip = swab32(ntohl(ip)); + memcpy(&cmd.req.arg[2], &temp_ip, sizeof(u32)); + err = qlcnic_issue_cmd(adapter, &cmd); + if (err != QLCNIC_RCODE_SUCCESS) + dev_err(&adapter->netdev->dev, + "could not notify %s IP 0x%x request\n", + (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip); + qlcnic_free_mbx_args(&cmd); +} + +int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode) +{ + int err; + u32 temp, arg1; + struct qlcnic_cmd_args cmd; + int lro_bit_mask; + + lro_bit_mask = (mode ? (BIT_0 | BIT_1 | BIT_2 | BIT_3) : 0); + + if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) + return 0; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO); + temp = adapter->recv_ctx->context_id << 16; + arg1 = lro_bit_mask | temp; + cmd.req.arg[1] = arg1; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_info(&adapter->pdev->dev, "LRO config failed\n"); + qlcnic_free_mbx_args(&cmd); + + return err; +} + +int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable) +{ + int err; + u32 word; + struct qlcnic_cmd_args cmd; + const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, + 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, + 0x255b0ec26d5a56daULL }; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS); + + /* + * RSS request: + * bits 3-0: Rsvd + * 5-4: hash_type_ipv4 + * 7-6: hash_type_ipv6 + * 8: enable + * 9: use indirection table + * 16-31: indirection table mask + */ + word = ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | + ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | + ((u32)(enable & 0x1) << 8) | + ((0x7ULL) << 16); + cmd.req.arg[1] = (adapter->recv_ctx->context_id); + cmd.req.arg[2] = word; + memcpy(&cmd.req.arg[4], key, sizeof(key)); + + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err) + dev_info(&adapter->pdev->dev, "RSS config failed\n"); + qlcnic_free_mbx_args(&cmd); + + return err; + +} + +int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, + __le16 vlan_id, u8 op) +{ + int err; + u32 *buf; + struct qlcnic_cmd_args cmd; + struct qlcnic_macvlan_mbx mv; + + if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) + return -EIO; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); + if (err) + return err; + cmd.req.arg[1] = op | (1 << 8) | + (adapter->recv_ctx->context_id << 16); + + mv.vlan = le16_to_cpu(vlan_id); + memcpy(&mv.mac, addr, ETH_ALEN); + buf = &cmd.req.arg[2]; + memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, + "MAC-VLAN %s to CAM failed, err=%d.\n", + ((op == 1) ? "add " : "delete "), err); + qlcnic_free_mbx_args(&cmd); + return err; +} + +void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr, + __le16 vlan_id) +{ + u8 mac[ETH_ALEN]; + memcpy(&mac, addr, ETH_ALEN); + qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD); +} + +void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac, + u8 type, struct qlcnic_cmd_args *cmd) +{ + switch (type) { + case QLCNIC_SET_STATION_MAC: + case QLCNIC_SET_FAC_DEF_MAC: + memcpy(&cmd->req.arg[2], mac, sizeof(u32)); + memcpy(&cmd->req.arg[3], &mac[4], sizeof(u16)); + break; + } + cmd->req.arg[1] = type; +} + +int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) +{ + int err, i; + struct qlcnic_cmd_args cmd; + u32 mac_low, mac_high; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); + qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd); + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err == QLCNIC_RCODE_SUCCESS) { + mac_low = cmd.rsp.arg[1]; + mac_high = cmd.rsp.arg[2]; + + for (i = 0; i < 2; i++) + mac[i] = (u8) (mac_high >> ((1 - i) * 8)); + for (i = 2; i < 6; i++) + mac[i] = (u8) (mac_low >> ((5 - i) * 8)); + } else { + dev_err(&adapter->pdev->dev, "Failed to get mac address%d\n", + err); + err = -EIO; + } + qlcnic_free_mbx_args(&cmd); + return err; +} + +void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter) +{ + int err; + u32 temp; + struct qlcnic_cmd_args cmd; + struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; + + if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) + return; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL); + cmd.req.arg[1] = 1 | (adapter->recv_ctx->context_id << 16); + cmd.req.arg[3] = coal->flag; + temp = coal->rx_time_us << 16; + cmd.req.arg[2] = coal->rx_packets | temp; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err != QLCNIC_RCODE_SUCCESS) + dev_info(&adapter->pdev->dev, + "Failed to send interrupt coalescence parameters\n"); + qlcnic_free_mbx_args(&cmd); +} + +static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, + u32 data[]) +{ + u8 link_status, duplex; + /* link speed */ + link_status = LSB(data[3]) & 1; + adapter->ahw->link_speed = MSW(data[2]); + adapter->ahw->link_autoneg = MSB(MSW(data[3])); + adapter->ahw->module_type = MSB(LSW(data[3])); + duplex = LSB(MSW(data[3])); + if (duplex) + adapter->ahw->link_duplex = DUPLEX_FULL; + else + adapter->ahw->link_duplex = DUPLEX_HALF; + adapter->ahw->has_link_events = 1; + qlcnic_advert_link_change(adapter, link_status); +} + +irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) +{ + struct qlcnic_adapter *adapter = data; + unsigned long flags; + u32 mask, resp, event; + + spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); + resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); + if (!(resp & QLCNIC_SET_OWNER)) + goto out; + + event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); + if (event & QLCNIC_MBX_ASYNC_EVENT) + qlcnic_83xx_process_aen(adapter); +out: + mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); + writel(0, adapter->ahw->pci_base0 + mask); + spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); + + return IRQ_HANDLED; +} + +int qlcnic_enable_eswitch(struct qlcnic_adapter *adapter, u8 port, u8 enable) +{ + int err = -EIO; + struct qlcnic_cmd_args cmd; + + if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) { + dev_err(&adapter->pdev->dev, + "%s: Error, invoked by non management func\n", + __func__); + return err; + } + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH); + cmd.req.arg[1] = (port & 0xf) | BIT_4; + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, "Failed to enable eswitch%d\n", + err); + err = -EIO; + } + qlcnic_free_mbx_args(&cmd); + + return err; + +} + +int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *nic) +{ + int i, err = -EIO; + struct qlcnic_cmd_args cmd; + + if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) { + dev_err(&adapter->pdev->dev, + "%s: Error, invoked by non management func\n", + __func__); + return err; + } + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + cmd.req.arg[1] = (nic->pci_func << 16); + cmd.req.arg[2] = 0x1 << 16; + cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16); + cmd.req.arg[4] = nic->capabilities; + cmd.req.arg[5] = (nic->max_mac_filters & 0xFF) | ((nic->max_mtu) << 16); + cmd.req.arg[6] = (nic->max_tx_ques) | ((nic->max_rx_ques) << 16); + cmd.req.arg[7] = (nic->min_tx_bw) | ((nic->max_tx_bw) << 16); + for (i = 8; i < 32; i++) + cmd.req.arg[i] = 0; + + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, "Failed to set nic info%d\n", + err); + err = -EIO; + } + + qlcnic_free_mbx_args(&cmd); + + return err; +} + +int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info, u8 func_id) +{ + int err; + u32 temp; + u8 op = 0; + struct qlcnic_cmd_args cmd; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); + if (func_id != adapter->ahw->pci_func) { + temp = func_id << 16; + cmd.req.arg[1] = op | BIT_31 | temp; + } else { + cmd.req.arg[1] = adapter->ahw->pci_func << 16; + } + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_info(&adapter->pdev->dev, + "Failed to get nic info %d\n", err); + goto out; + } + + npar_info->op_type = cmd.rsp.arg[1]; + npar_info->pci_func = cmd.rsp.arg[2] & 0xFFFF; + npar_info->op_mode = (cmd.rsp.arg[2] & 0xFFFF0000) >> 16; + npar_info->phys_port = cmd.rsp.arg[3] & 0xFFFF; + npar_info->switch_mode = (cmd.rsp.arg[3] & 0xFFFF0000) >> 16; + npar_info->capabilities = cmd.rsp.arg[4]; + npar_info->max_mac_filters = cmd.rsp.arg[5] & 0xFF; + npar_info->max_mtu = (cmd.rsp.arg[5] & 0xFFFF0000) >> 16; + npar_info->max_tx_ques = cmd.rsp.arg[6] & 0xFFFF; + npar_info->max_rx_ques = (cmd.rsp.arg[6] & 0xFFFF0000) >> 16; + npar_info->min_tx_bw = cmd.rsp.arg[7] & 0xFFFF; + npar_info->max_tx_bw = (cmd.rsp.arg[7] & 0xFFFF0000) >> 16; + if (cmd.rsp.arg[8] & 0x1) + npar_info->max_bw_reg_offset = (cmd.rsp.arg[8] & 0x7FFE) >> 1; + if (cmd.rsp.arg[8] & 0x10000) { + temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17; + npar_info->max_linkspeed_reg_offset = temp; + } + +out: + qlcnic_free_mbx_args(&cmd); + return err; +} + +int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter, + struct qlcnic_pci_info *pci_info) +{ + int i, err = 0, j = 0; + u32 temp; + struct qlcnic_cmd_args cmd; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); + err = qlcnic_issue_cmd(adapter, &cmd); + + adapter->ahw->act_pci_func = 0; + if (err == QLCNIC_RCODE_SUCCESS) { + pci_info->func_count = cmd.rsp.arg[1] & 0xFF; + dev_info(&adapter->pdev->dev, + "%s: total functions = %d\n", + __func__, pci_info->func_count); + for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) { + pci_info->id = cmd.rsp.arg[i] & 0xFFFF; + pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; + i++; + pci_info->type = cmd.rsp.arg[i] & 0xFFFF; + if (pci_info->type == QLCNIC_TYPE_NIC) + adapter->ahw->act_pci_func++; + temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; + pci_info->default_port = temp; + i++; + pci_info->tx_min_bw = cmd.rsp.arg[i] & 0xFFFF; + temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; + pci_info->tx_max_bw = temp; + i = i + 2; + memcpy(pci_info->mac, &cmd.rsp.arg[i], ETH_ALEN - 2); + i++; + memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2); + i = i + 3; + + dev_info(&adapter->pdev->dev, "%s:\n" + "\tid = %d active = %d type = %d\n" + "\tport = %d min bw = %d max bw = %d\n" + "\tmac_addr = %pM\n", __func__, + pci_info->id, pci_info->active, pci_info->type, + pci_info->default_port, pci_info->tx_min_bw, + pci_info->tx_max_bw, pci_info->mac); + } + } else { + dev_err(&adapter->pdev->dev, "Failed to get PCI Info%d\n", + err); + err = -EIO; + } + + qlcnic_free_mbx_args(&cmd); + + return err; +} + +int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type) +{ + int i, index, err; + bool type; + u8 max_ints; + u32 val, temp; + struct qlcnic_cmd_args cmd; + + max_ints = adapter->ahw->num_msix - 1; + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT); + cmd.req.arg[1] = max_ints; + for (i = 0, index = 2; i < max_ints; i++) { + type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; + val = type | (adapter->ahw->intr_tbl[i].type << 4); + if (adapter->ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX) + val |= (adapter->ahw->intr_tbl[i].id << 16); + cmd.req.arg[index++] = val; + } + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to configure interrupts 0x%x\n", err); + goto out; + } + + max_ints = cmd.rsp.arg[1]; + for (i = 0, index = 2; i < max_ints; i++, index += 2) { + val = cmd.rsp.arg[index]; + if (LSB(val)) { + dev_info(&adapter->pdev->dev, + "Can't configure interrupt %d\n", + adapter->ahw->intr_tbl[i].id); + continue; + } + if (op_type) { + adapter->ahw->intr_tbl[i].id = MSW(val); + adapter->ahw->intr_tbl[i].enabled = 1; + temp = cmd.rsp.arg[index + 1]; + adapter->ahw->intr_tbl[i].src = temp; + } else { + adapter->ahw->intr_tbl[i].id = i; + adapter->ahw->intr_tbl[i].enabled = 0; + adapter->ahw->intr_tbl[i].src = 0; + } + } +out: + qlcnic_free_mbx_args(&cmd); + return err; +} + +int qlcnic_83xx_lock_flash(struct qlcnic_adapter *adapter) +{ + int id, timeout = 0; + u32 status = 0; + + while (status == 0) { + status = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK); + if (status) + break; + + if (++timeout >= QLC_83XX_FLASH_LOCK_TIMEOUT) { + id = QLC_SHARED_REG_RD32(adapter, + QLCNIC_FLASH_LOCK_OWNER); + dev_err(&adapter->pdev->dev, + "%s: failed, lock held by %d\n", __func__, id); + return -EIO; + } + usleep_range(1000, 2000); + } + + QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, adapter->portnum); + return 0; +} + +void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *adapter) +{ + QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK); + QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, 0xFF); +} + +int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter, + u32 flash_addr, u8 *p_data, + int count) +{ + int i, ret; + u32 word, range, flash_offset, addr = flash_addr; + ulong indirect_add, direct_window; + + flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1); + if (addr & 0x3) { + dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr); + return -EIO; + } + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW, + (addr)); + + range = flash_offset + (count * sizeof(u32)); + /* Check if data is spread across multiple sectors */ + if (range > (QLCNIC_FLASH_SECTOR_SIZE - 1)) { + + /* Multi sector read */ + for (i = 0; i < count; i++) { + indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); + ret = qlcnic_83xx_rd_reg_indirect(adapter, + indirect_add); + if (ret == -EIO) + return -EIO; + + word = ret; + *(u32 *)p_data = word; + p_data = p_data + 4; + addr = addr + 4; + flash_offset = flash_offset + 4; + + if (flash_offset > (QLCNIC_FLASH_SECTOR_SIZE - 1)) { + direct_window = QLC_83XX_FLASH_DIRECT_WINDOW; + /* This write is needed once for each sector */ + qlcnic_83xx_wrt_reg_indirect(adapter, + direct_window, + (addr)); + flash_offset = 0; + } + } + } else { + /* Single sector read */ + for (i = 0; i < count; i++) { + indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); + ret = qlcnic_83xx_rd_reg_indirect(adapter, + indirect_add); + if (ret == -EIO) + return -EIO; + + word = ret; + *(u32 *)p_data = word; + p_data = p_data + 4; + addr = addr + 4; + } + } + + return 0; +} + +static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter) +{ + u32 status; + int retries = QLC_83XX_FLASH_READ_RETRY_COUNT; + + do { + status = qlcnic_83xx_rd_reg_indirect(adapter, + QLC_83XX_FLASH_STATUS); + if ((status & QLC_83XX_FLASH_STATUS_READY) == + QLC_83XX_FLASH_STATUS_READY) + break; + + msleep(QLC_83XX_FLASH_STATUS_REG_POLL_DELAY); + } while (--retries); + + if (!retries) + return -EIO; + + return 0; +} + +static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter) +{ + int ret; + u32 cmd; + cmd = adapter->ahw->fdt.write_statusreg_cmd; + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG | cmd)); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, + adapter->ahw->fdt.write_enable_bits); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_SECOND_ERASE_MS_VAL); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) + return -EIO; + + return 0; +} + +static int qlcnic_83xx_disable_flash_write_op(struct qlcnic_adapter *adapter) +{ + int ret; + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG | + adapter->ahw->fdt.write_statusreg_cmd)); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, + adapter->ahw->fdt.write_disable_bits); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_SECOND_ERASE_MS_VAL); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) + return -EIO; + + return 0; +} + +int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter) +{ + int ret, mfg_id; + + if (qlcnic_83xx_lock_flash(adapter)) + return -EIO; + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_READ_CTRL); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + + mfg_id = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA); + if (mfg_id == -EIO) + return -EIO; + + adapter->flash_mfg_id = (mfg_id & 0xFF); + qlcnic_83xx_unlock_flash(adapter); + + return 0; +} + +int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter) +{ + int count, fdt_size, ret = 0; + + fdt_size = sizeof(struct qlcnic_fdt); + count = fdt_size / sizeof(u32); + + if (qlcnic_83xx_lock_flash(adapter)) + return -EIO; + + memset(&adapter->ahw->fdt, 0, fdt_size); + ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION, + (u8 *)&adapter->ahw->fdt, + count); + + qlcnic_83xx_unlock_flash(adapter); + return ret; +} + +int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter, + u32 sector_start_addr) +{ + u32 reversed_addr, addr1, addr2, cmd; + int ret = -EIO; + + if (qlcnic_83xx_lock_flash(adapter) != 0) + return -EIO; + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_enable_flash_write_op(adapter); + if (ret) { + qlcnic_83xx_unlock_flash(adapter); + dev_err(&adapter->pdev->dev, + "%s failed at %d\n", + __func__, __LINE__); + return ret; + } + } + + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + qlcnic_83xx_unlock_flash(adapter); + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return -EIO; + } + + addr1 = (sector_start_addr & 0xFF) << 16; + addr2 = (sector_start_addr & 0xFF0000) >> 16; + reversed_addr = addr1 | addr2; + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, + reversed_addr); + cmd = QLC_83XX_FLASH_FDT_ERASE_DEF_SIG | adapter->ahw->fdt.erase_cmd; + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, cmd); + else + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + QLC_83XX_FLASH_OEM_ERASE_SIG); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_LAST_ERASE_MS_VAL); + + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + qlcnic_83xx_unlock_flash(adapter); + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return -EIO; + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write_op(adapter); + if (ret) { + qlcnic_83xx_unlock_flash(adapter); + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return ret; + } + } + + qlcnic_83xx_unlock_flash(adapter); + + return 0; +} + +int qlcnic_83xx_flash_write32(struct qlcnic_adapter *adapter, u32 addr, + u32 *p_data) +{ + int ret = -EIO; + u32 addr1 = 0x00800000 | (addr >> 2); + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, addr1); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_LAST_ERASE_MS_VAL); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return -EIO; + } + + return 0; +} + +int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr, + u32 *p_data, int count) +{ + u32 temp; + int ret = -EIO; + + if ((count < QLC_83XX_FLASH_BULK_WRITE_MIN) || + (count > QLC_83XX_FLASH_BULK_WRITE_MAX)) { + dev_err(&adapter->pdev->dev, + "%s: Invalid word count\n", __func__); + return -EIO; + } + + temp = qlcnic_83xx_rd_reg_indirect(adapter, + QLC_83XX_FLASH_SPI_CONTROL); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL, + (temp | QLC_83XX_FLASH_SPI_CTRL)); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + QLC_83XX_FLASH_ADDR_TEMP_VAL); + + /* First DWORD write */ + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_FIRST_MS_PATTERN); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return -EIO; + } + + count--; + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL); + /* Second to N-1 DWORD writes */ + while (count != 1) { + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, + *p_data++); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_SECOND_MS_PATTERN); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return -EIO; + } + count--; + } + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + QLC_83XX_FLASH_ADDR_TEMP_VAL | + (addr >> 2)); + /* Last DWORD write */ + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_LAST_MS_PATTERN); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return -EIO; + } + + ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_SPI_STATUS); + if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) { + dev_err(&adapter->pdev->dev, "%s: failed at %d\n", + __func__, __LINE__); + /* Operation failed, clear error bit */ + temp = qlcnic_83xx_rd_reg_indirect(adapter, + QLC_83XX_FLASH_SPI_CONTROL); + qlcnic_83xx_wrt_reg_indirect(adapter, + QLC_83XX_FLASH_SPI_CONTROL, + (temp | QLC_83XX_FLASH_SPI_CTRL)); + } + + return 0; +} + +static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter) +{ + u32 val, id; + + val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK); + + /* Check if recovery need to be performed by the calling function */ + if ((val & QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK) == 0) { + val = val & ~0x3F; + val = val | ((adapter->portnum << 2) | + QLC_83XX_NEED_DRV_LOCK_RECOVERY); + QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val); + dev_info(&adapter->pdev->dev, + "%s: lock recovery initiated\n", __func__); + msleep(QLC_83XX_DRV_LOCK_RECOVERY_DELAY); + val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK); + id = ((val >> 2) & 0xF); + if (id == adapter->portnum) { + val = val & ~QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK; + val = val | QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS; + QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val); + /* Force release the lock */ + QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK); + /* Clear recovery bits */ + val = val & ~0x3F; + QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val); + dev_info(&adapter->pdev->dev, + "%s: lock recovery completed\n", __func__); + } else { + dev_info(&adapter->pdev->dev, + "%s: func %d to resume lock recovery process\n", + __func__, id); + } + } else { + dev_info(&adapter->pdev->dev, + "%s: lock recovery initiated by other functions\n", + __func__); + } +} + +int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter) +{ + u32 lock_alive_counter, val, id, i = 0, status = 0, temp = 0; + int max_attempt = 0; + + while (status == 0) { + status = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK); + if (status) + break; + + msleep(QLC_83XX_DRV_LOCK_WAIT_DELAY); + i++; + + if (i == 1) + temp = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); + + if (i == QLC_83XX_DRV_LOCK_WAIT_COUNTER) { + val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); + if (val == temp) { + id = val & 0xFF; + dev_info(&adapter->pdev->dev, + "%s: lock to be recovered from %d\n", + __func__, id); + qlcnic_83xx_recover_driver_lock(adapter); + i = 0; + max_attempt++; + } else { + dev_err(&adapter->pdev->dev, + "%s: failed to get lock\n", __func__); + return -EIO; + } + } + + /* Force exit from while loop after few attempts */ + if (max_attempt == QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT) { + dev_err(&adapter->pdev->dev, + "%s: failed to get lock\n", __func__); + return -EIO; + } + } + + val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); + lock_alive_counter = val >> 8; + lock_alive_counter++; + val = lock_alive_counter << 8 | adapter->portnum; + QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val); + + return 0; +} + +void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter) +{ + u32 val, lock_alive_counter, id; + + val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); + id = val & 0xFF; + lock_alive_counter = val >> 8; + + if (id != adapter->portnum) + dev_err(&adapter->pdev->dev, + "%s:Warning func %d is unlocking lock owned by %d\n", + __func__, adapter->portnum, id); + + val = (lock_alive_counter << 8) | 0xFF; + QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val); + QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK); +} + +int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr, + u32 *data, u32 count) +{ + int i, j, ret = 0; + u32 temp; + + /* Check alignment */ + if (addr & 0xF) + return -EIO; + + mutex_lock(&adapter->ahw->mem_lock); + qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_HI, 0); + + for (i = 0; i < count; i++, addr += 16) { + if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET, + QLCNIC_ADDR_QDR_NET_MAX)) || + (ADDR_IN_RANGE(addr, QLCNIC_ADDR_DDR_NET, + QLCNIC_ADDR_DDR_NET_MAX)))) { + mutex_unlock(&adapter->ahw->mem_lock); + return -EIO; + } + + qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_LO, addr); + qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_LO, + *data++); + qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_HI, + *data++); + qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_ULO, + *data++); + qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_UHI, + *data++); + qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL, + QLCNIC_TA_WRITE_ENABLE); + qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL, + QLCNIC_TA_WRITE_START); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = qlcnic_83xx_rd_reg_indirect(adapter, + QLCNIC_MS_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + /* Status check failure */ + if (j >= MAX_CTL_CHECK) { + printk_ratelimited(KERN_WARNING + "MS memory write failed\n"); + mutex_unlock(&adapter->ahw->mem_lock); + return -EIO; + } + } + + mutex_unlock(&adapter->ahw->mem_lock); + + return ret; +} + +int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, + u8 *p_data, int count) +{ + int i, ret; + u32 word, addr = flash_addr; + ulong indirect_addr; + + if (qlcnic_83xx_lock_flash(adapter) != 0) + return -EIO; + + if (addr & 0x3) { + dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + + for (i = 0; i < count; i++) { + if (qlcnic_83xx_wrt_reg_indirect(adapter, + QLC_83XX_FLASH_DIRECT_WINDOW, + (addr))) { + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + + indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr); + ret = qlcnic_83xx_rd_reg_indirect(adapter, + indirect_addr); + if (ret == -EIO) + return -EIO; + word = ret; + *(u32 *)p_data = word; + p_data = p_data + 4; + addr = addr + 4; + } + + qlcnic_83xx_unlock_flash(adapter); + + return 0; +} + +int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) +{ + int err; + u32 config = 0, state; + struct qlcnic_cmd_args cmd; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(ahw->pci_func)); + if (!QLC_83xx_FUNC_VAL(state, ahw->pci_func)) { + dev_info(&adapter->pdev->dev, "link state down\n"); + return config; + } + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS); + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_info(&adapter->pdev->dev, + "Get Link Status Command failed: 0x%x\n", err); + goto out; + } else { + config = cmd.rsp.arg[1]; + switch (QLC_83XX_CURRENT_LINK_SPEED(config)) { + case QLC_83XX_10M_LINK: + ahw->link_speed = SPEED_10; + break; + case QLC_83XX_100M_LINK: + ahw->link_speed = SPEED_100; + break; + case QLC_83XX_1G_LINK: + ahw->link_speed = SPEED_1000; + break; + case QLC_83XX_10G_LINK: + ahw->link_speed = SPEED_10000; + break; + default: + ahw->link_speed = 0; + break; + } + config = cmd.rsp.arg[3]; + if (config & 1) + err = 1; + } +out: + qlcnic_free_mbx_args(&cmd); + return config; +} + +int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter) +{ + u32 config = 0; + int status = 0; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + /* Get port configuration info */ + status = qlcnic_83xx_get_port_info(adapter); + /* Get Link Status related info */ + config = qlcnic_83xx_test_link(adapter); + ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); + /* hard code until there is a way to get it from flash */ + ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; + return status; +} + +int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter, + struct ethtool_cmd *ecmd) +{ + int status = 0; + u32 config = adapter->ahw->port_config; + + if (ecmd->autoneg) + adapter->ahw->port_config |= BIT_15; + + switch (ethtool_cmd_speed(ecmd)) { + case SPEED_10: + adapter->ahw->port_config |= BIT_8; + break; + case SPEED_100: + adapter->ahw->port_config |= BIT_9; + break; + case SPEED_1000: + adapter->ahw->port_config |= BIT_10; + break; + case SPEED_10000: + adapter->ahw->port_config |= BIT_11; + break; + default: + return -EINVAL; + } + + status = qlcnic_83xx_set_port_config(adapter); + if (status) { + dev_info(&adapter->pdev->dev, + "Faild to Set Link Speed and autoneg.\n"); + adapter->ahw->port_config = config; + } + return status; +} + +static inline u64 *qlcnic_83xx_copy_stats(struct qlcnic_cmd_args *cmd, + u64 *data, int index) +{ + u32 low, hi; + u64 val; + + low = cmd->rsp.arg[index]; + hi = cmd->rsp.arg[index + 1]; + val = (((u64) low) | (((u64) hi) << 32)); + *data++ = val; + return data; +} + +static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd, u64 *data, + int type, int *ret) +{ + int err, k, total_regs; + + *ret = 0; + err = qlcnic_issue_cmd(adapter, cmd); + if (err != QLCNIC_RCODE_SUCCESS) { + dev_info(&adapter->pdev->dev, + "Error in get statistics mailbox command\n"); + *ret = -EIO; + return data; + } + total_regs = cmd->rsp.num; + switch (type) { + case QLC_83XX_STAT_MAC: + /* fill in MAC tx counters */ + for (k = 2; k < 28; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + /* skip 24 bytes of reserved area */ + /* fill in MAC rx counters */ + for (k += 6; k < 60; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + /* skip 24 bytes of reserved area */ + /* fill in MAC rx frame stats */ + for (k += 6; k < 80; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + break; + case QLC_83XX_STAT_RX: + for (k = 2; k < 8; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + /* skip 8 bytes of reserved data */ + for (k += 2; k < 24; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + /* skip 8 bytes containing RE1FBQ error data */ + for (k += 2; k < total_regs; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + break; + case QLC_83XX_STAT_TX: + for (k = 2; k < 10; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + /* skip 8 bytes of reserved data */ + for (k += 2; k < total_regs; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + break; + default: + dev_warn(&adapter->pdev->dev, "Unknown get statistics mode\n"); + *ret = -EIO; + } + return data; +} + +void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) +{ + struct qlcnic_cmd_args cmd; + int ret = 0; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); + /* Get Tx stats */ + cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16); + cmd.rsp.num = QLC_83XX_TX_STAT_REGS; + data = qlcnic_83xx_fill_stats(adapter, &cmd, data, + QLC_83XX_STAT_TX, &ret); + if (ret) { + dev_info(&adapter->pdev->dev, "Error getting MAC stats\n"); + goto out; + } + /* Get MAC stats */ + cmd.req.arg[1] = BIT_2 | (adapter->portnum << 16); + cmd.rsp.num = QLC_83XX_MAC_STAT_REGS; + memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num); + data = qlcnic_83xx_fill_stats(adapter, &cmd, data, + QLC_83XX_STAT_MAC, &ret); + if (ret) { + dev_info(&adapter->pdev->dev, + "Error getting Rx stats\n"); + goto out; + } + /* Get Rx stats */ + cmd.req.arg[1] = adapter->recv_ctx->context_id << 16; + cmd.rsp.num = QLC_83XX_RX_STAT_REGS; + memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num); + data = qlcnic_83xx_fill_stats(adapter, &cmd, data, + QLC_83XX_STAT_RX, &ret); + if (ret) + dev_info(&adapter->pdev->dev, + "Error getting Tx stats\n"); +out: + qlcnic_free_mbx_args(&cmd); +} + +int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter) +{ + u32 major, minor, sub; + + major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR); + minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR); + sub = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB); + + if (adapter->fw_version != QLCNIC_VERSION_CODE(major, minor, sub)) { + dev_info(&adapter->pdev->dev, "%s: Reg test failed\n", + __func__); + return 1; + } + return 0; +} + +int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter) +{ + return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) * + sizeof(adapter->ahw->ext_reg_tbl)) + + (ARRAY_SIZE(qlcnic_83xx_reg_tbl) + + sizeof(adapter->ahw->reg_tbl)); +} + +int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff) +{ + int i, j = 0; + + for (i = QLCNIC_DEV_INFO_SIZE + 1; + j < ARRAY_SIZE(qlcnic_83xx_reg_tbl); i++, j++) + regs_buff[i] = QLC_SHARED_REG_RD32(adapter, j); + + for (j = 0; j < ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl); j++) + regs_buff[i++] = QLCRDX(adapter->ahw, j); + return i; +} + +int qlcnic_83xx_interrupt_test(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_cmd_args cmd; + u32 data; + u16 intrpt_id, id; + u8 val; + int ret, max_sds_rings = adapter->max_sds_rings; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EIO; + + ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); + if (ret) + goto fail_diag_irq; + + ahw->diag_cnt = 0; + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + intrpt_id = ahw->intr_tbl[0].id; + else + intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); + + cmd.req.arg[1] = 1; + cmd.req.arg[2] = intrpt_id; + cmd.req.arg[3] = BIT_0; + + ret = qlcnic_issue_cmd(adapter, &cmd); + data = cmd.rsp.arg[2]; + id = LSW(data); + val = LSB(MSW(data)); + if (id != intrpt_id) + dev_info(&adapter->pdev->dev, + "Interrupt generated: 0x%x, requested:0x%x\n", + id, intrpt_id); + if (val) + dev_err(&adapter->pdev->dev, + "Interrupt test error: 0x%x\n", val); + if (ret) + goto done; + + msleep(20); + ret = !ahw->diag_cnt; + +done: + qlcnic_free_mbx_args(&cmd); + qlcnic_83xx_diag_free_res(netdev, max_sds_rings); + +fail_diag_irq: + adapter->max_sds_rings = max_sds_rings; + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return ret; +} + +void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter, + struct ethtool_pauseparam *pause) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int status = 0; + u32 config; + + status = qlcnic_83xx_get_port_config(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "%s: Get Pause Config failed\n", __func__); + return; + } + config = ahw->port_config; + if (config & QLC_83XX_CFG_STD_PAUSE) { + if (config & QLC_83XX_CFG_STD_TX_PAUSE) + pause->tx_pause = 1; + if (config & QLC_83XX_CFG_STD_RX_PAUSE) + pause->rx_pause = 1; + } + + if (QLC_83XX_AUTONEG(config)) + pause->autoneg = 1; +} + +int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter, + struct ethtool_pauseparam *pause) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int status = 0; + u32 config; + + status = qlcnic_83xx_get_port_config(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "%s: Get Pause Config failed.\n", __func__); + return status; + } + config = ahw->port_config; + + if (ahw->port_type == QLCNIC_GBE) { + if (pause->autoneg) + ahw->port_config |= QLC_83XX_ENABLE_AUTONEG; + if (!pause->autoneg) + ahw->port_config &= ~QLC_83XX_ENABLE_AUTONEG; + } else if ((ahw->port_type == QLCNIC_XGBE) && (pause->autoneg)) { + return -EOPNOTSUPP; + } + + if (!(config & QLC_83XX_CFG_STD_PAUSE)) + ahw->port_config |= QLC_83XX_CFG_STD_PAUSE; + + if (pause->rx_pause && pause->tx_pause) { + ahw->port_config |= QLC_83XX_CFG_STD_TX_RX_PAUSE; + } else if (pause->rx_pause && !pause->tx_pause) { + ahw->port_config &= ~QLC_83XX_CFG_STD_TX_PAUSE; + ahw->port_config |= QLC_83XX_CFG_STD_RX_PAUSE; + } else if (pause->tx_pause && !pause->rx_pause) { + ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE; + ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE; + } else if (!pause->rx_pause && !pause->tx_pause) { + ahw->port_config &= ~QLC_83XX_CFG_STD_TX_RX_PAUSE; + } + status = qlcnic_83xx_set_port_config(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "%s: Set Pause Config failed.\n", __func__); + ahw->port_config = config; + } + return status; +} + +static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter) +{ + int ret; + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + QLC_83XX_FLASH_OEM_READ_SIG); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_READ_CTRL); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) + return -EIO; + + ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA); + return ret & 0xFF; +} + +int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter) +{ + int status; + + status = qlcnic_83xx_read_flash_status_reg(adapter); + if (status == -EIO) { + dev_info(&adapter->pdev->dev, "%s: EEPROM test failed.\n", + __func__); + return 1; + } + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h new file mode 100644 index 000000000000..61f81f6c84a9 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -0,0 +1,438 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef __QLCNIC_83XX_HW_H +#define __QLCNIC_83XX_HW_H + +#include <linux/types.h> +#include <linux/etherdevice.h> +#include "qlcnic_hw.h" + +/* Directly mapped registers */ +#define QLC_83XX_CRB_WIN_BASE 0x3800 +#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4)) +#define QLC_83XX_SEM_LOCK_BASE 0x3840 +#define QLC_83XX_SEM_UNLOCK_BASE 0x3844 +#define QLC_83XX_SEM_LOCK_FUNC(f) (QLC_83XX_SEM_LOCK_BASE+((f)*8)) +#define QLC_83XX_SEM_UNLOCK_FUNC(f) (QLC_83XX_SEM_UNLOCK_BASE+((f)*8)) +#define QLC_83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0)) +#define QLC_83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4)) +#define QLC_83XX_LINK_SPEED_FACTOR 10 +#define QLC_83xx_FUNC_VAL(v, f) ((v) & (1 << (f * 4))) +#define QLC_83XX_INTX_PTR 0x38C0 +#define QLC_83XX_INTX_TRGR 0x38C4 +#define QLC_83XX_INTX_MASK 0x38C8 + +#define QLC_83XX_DRV_LOCK_WAIT_COUNTER 100 +#define QLC_83XX_DRV_LOCK_WAIT_DELAY 20 +#define QLC_83XX_NEED_DRV_LOCK_RECOVERY 1 +#define QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS 2 +#define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT 3 +#define QLC_83XX_DRV_LOCK_RECOVERY_DELAY 200 +#define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK 0x3 + +#define QLC_83XX_NO_NIC_RESOURCE 0x5 +#define QLC_83XX_MAC_PRESENT 0xC +#define QLC_83XX_MAC_ABSENT 0xD + + +#define QLC_83XX_FLASH_SECTOR_SIZE (64 * 1024) + +/* PEG status definitions */ +#define QLC_83XX_CMDPEG_COMPLETE 0xff01 +#define QLC_83XX_VALID_INTX_BIT30(val) ((val) & BIT_30) +#define QLC_83XX_VALID_INTX_BIT31(val) ((val) & BIT_31) +#define QLC_83XX_INTX_FUNC(val) ((val) & 0xFF) +#define QLC_83XX_LEGACY_INTX_MAX_RETRY 100 +#define QLC_83XX_LEGACY_INTX_DELAY 4 +#define QLC_83XX_REG_DESC 1 +#define QLC_83XX_LRO_DESC 2 +#define QLC_83XX_CTRL_DESC 3 +#define QLC_83XX_FW_CAPABILITY_TSO BIT_6 +#define QLC_83XX_FW_CAP_LRO_MSS BIT_17 +#define QLC_83XX_HOST_RDS_MODE_UNIQUE 0 +#define QLC_83XX_HOST_SDS_MBX_IDX 8 + +#define QLCNIC_HOST_RDS_MBX_IDX 88 +#define QLCNIC_MAX_RING_SETS 8 + +/* Pause control registers */ +#define QLC_83XX_SRE_SHIM_REG 0x0D200284 +#define QLC_83XX_PORT0_THRESHOLD 0x0B2003A4 +#define QLC_83XX_PORT1_THRESHOLD 0x0B2013A4 +#define QLC_83XX_PORT0_TC_MC_REG 0x0B200388 +#define QLC_83XX_PORT1_TC_MC_REG 0x0B201388 +#define QLC_83XX_PORT0_TC_STATS 0x0B20039C +#define QLC_83XX_PORT1_TC_STATS 0x0B20139C +#define QLC_83XX_PORT2_IFB_THRESHOLD 0x0B200704 +#define QLC_83XX_PORT3_IFB_THRESHOLD 0x0B201704 + +/* Peg PC status registers */ +#define QLC_83XX_CRB_PEG_NET_0 0x3400003c +#define QLC_83XX_CRB_PEG_NET_1 0x3410003c +#define QLC_83XX_CRB_PEG_NET_2 0x3420003c +#define QLC_83XX_CRB_PEG_NET_3 0x3430003c +#define QLC_83XX_CRB_PEG_NET_4 0x34b0003c + +/* Firmware image definitions */ +#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000 +#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin" +#define QLC_83XX_BOOT_FROM_FLASH 0 +#define QLC_83XX_BOOT_FROM_FILE 0x12345678 + +#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16 + +struct qlcnic_intrpt_config { + u8 type; + u8 enabled; + u16 id; + u32 src; +}; + +struct qlcnic_macvlan_mbx { + u8 mac[ETH_ALEN]; + u16 vlan; +}; + +struct qlc_83xx_fw_info { + const struct firmware *fw; + u16 major_fw_version; + u8 minor_fw_version; + u8 sub_fw_version; + u8 fw_build_num; + u8 load_from_file; +}; + +struct qlc_83xx_reset { + struct qlc_83xx_reset_hdr *hdr; + int seq_index; + int seq_error; + int array_index; + u32 array[QLC_83XX_MAX_RESET_SEQ_ENTRIES]; + u8 *buff; + u8 *stop_offset; + u8 *start_offset; + u8 *init_offset; + u8 seq_end; + u8 template_end; +}; + +#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1 +#define QLC_83XX_IDC_GRACEFULL_RESET 0x2 +#define QLC_83XX_IDC_TIMESTAMP 0 +#define QLC_83XX_IDC_DURATION 1 +#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30 +#define QLC_83XX_IDC_RESET_ACK_TIMEOUT_SECS 10 +#define QLC_83XX_IDC_RESET_TIMEOUT_SECS 10 +#define QLC_83XX_IDC_QUIESCE_ACK_TIMEOUT_SECS 20 +#define QLC_83XX_IDC_FW_POLL_DELAY (1 * HZ) +#define QLC_83XX_IDC_FW_FAIL_THRESH 2 +#define QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO 8 +#define QLC_83XX_IDC_MAX_CNA_FUNCTIONS 16 +#define QLC_83XX_IDC_MAJOR_VERSION 1 +#define QLC_83XX_IDC_MINOR_VERSION 0 +#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020 + +struct qlcnic_adapter; +struct qlc_83xx_idc { + int (*state_entry) (struct qlcnic_adapter *); + u64 sec_counter; + u64 delay; + unsigned long status; + int err_code; + int collect_dump; + u8 curr_state; + u8 prev_state; + u8 vnic_state; + u8 vnic_wait_limit; + u8 quiesce_req; + char **name; +}; + +#define QLCNIC_MBX_RSP(reg) LSW(reg) +#define QLCNIC_MBX_NUM_REGS(reg) (MSW(reg) & 0x1FF) +#define QLCNIC_MBX_STATUS(reg) (((reg) >> 25) & 0x7F) +#define QLCNIC_MBX_HOST(ahw, i) ((ahw)->pci_base0 + ((i) * 4)) +#define QLCNIC_MBX_FW(ahw, i) ((ahw)->pci_base0 + 0x800 + ((i) * 4)) + +/* Mailbox process AEN count */ +#define QLC_83XX_IDC_COMP_AEN 3 +#define QLC_83XX_MBX_AEN_CNT 5 +#define QLC_83XX_MODULE_LOADED 1 +#define QLC_83XX_MBX_READY 2 +#define QLC_83XX_MBX_AEN_ACK 3 +#define QLC_83XX_SFP_PRESENT(data) ((data) & 3) +#define QLC_83XX_SFP_ERR(data) (((data) >> 2) & 3) +#define QLC_83XX_SFP_MODULE_TYPE(data) (((data) >> 4) & 0x1F) +#define QLC_83XX_SFP_CU_LENGTH(data) (LSB((data) >> 16)) +#define QLC_83XX_SFP_TX_FAULT(data) ((data) & BIT_10) +#define QLC_83XX_SFP_10G_CAPABLE(data) ((data) & BIT_11) +#define QLC_83XX_LINK_STATS(data) ((data) & BIT_0) +#define QLC_83XX_CURRENT_LINK_SPEED(data) (((data) >> 3) & 7) +#define QLC_83XX_LINK_PAUSE(data) (((data) >> 6) & 3) +#define QLC_83XX_LINK_LB(data) (((data) >> 8) & 7) +#define QLC_83XX_LINK_FEC(data) ((data) & BIT_12) +#define QLC_83XX_LINK_EEE(data) ((data) & BIT_13) +#define QLC_83XX_DCBX(data) (((data) >> 28) & 7) +#define QLC_83XX_AUTONEG(data) ((data) & BIT_15) +#define QLC_83XX_CFG_STD_PAUSE (1 << 5) +#define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20) +#define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20) +#define QLC_83XX_CFG_STD_TX_RX_PAUSE (3 << 20) +#define QLC_83XX_ENABLE_AUTONEG (1 << 15) +#define QLC_83XX_CFG_LOOPBACK_HSS (2 << 1) +#define QLC_83XX_CFG_LOOPBACK_PHY (3 << 1) +#define QLC_83XX_CFG_LOOPBACK_EXT (4 << 1) + +/* LED configuration settings */ +#define QLC_83XX_ENABLE_BEACON 0xe +#define QLC_83XX_LED_RATE 0xff +#define QLC_83XX_LED_ACT (1 << 10) +#define QLC_83XX_LED_MOD (0 << 13) +#define QLC_83XX_LED_CONFIG (QLC_83XX_LED_RATE | QLC_83XX_LED_ACT | \ + QLC_83XX_LED_MOD) + +#define QLC_83XX_10M_LINK 1 +#define QLC_83XX_100M_LINK 2 +#define QLC_83XX_1G_LINK 3 +#define QLC_83XX_10G_LINK 4 +#define QLC_83XX_STAT_TX 3 +#define QLC_83XX_STAT_RX 2 +#define QLC_83XX_STAT_MAC 1 +#define QLC_83XX_TX_STAT_REGS 14 +#define QLC_83XX_RX_STAT_REGS 40 +#define QLC_83XX_MAC_STAT_REGS 80 + +#define QLC_83XX_GET_FUNC_PRIVILEGE(VAL, FN) (0x3 & ((VAL) >> (FN * 2))) +#define QLC_83XX_SET_FUNC_OPMODE(VAL, FN) ((VAL) << (FN * 2)) +#define QLC_83XX_DEFAULT_OPMODE 0x55555555 +#define QLC_83XX_PRIVLEGED_FUNC 0x1 +#define QLC_83XX_VIRTUAL_FUNC 0x2 + +#define QLC_83XX_LB_MAX_FILTERS 2048 +#define QLC_83XX_LB_BUCKET_SIZE 256 +#define QLC_83XX_MINIMUM_VECTOR 3 + +#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000) +#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20) +#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40) +#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40) +#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400) +#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000) +#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000) +#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF +#define QLC_83XX_DEFAULT_MODE 0x0 +#define QLCNIC_BRDTYPE_83XX_10G 0x0083 + +#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010 +#define QLC_83XX_FLASH_SPI_CONTROL 0x2808E014 +#define QLC_83XX_FLASH_STATUS 0x42100004 +#define QLC_83XX_FLASH_CONTROL 0x42110004 +#define QLC_83XX_FLASH_ADDR 0x42110008 +#define QLC_83XX_FLASH_WRDATA 0x4211000C +#define QLC_83XX_FLASH_RDDATA 0x42110018 +#define QLC_83XX_FLASH_DIRECT_WINDOW 0x42110030 +#define QLC_83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA)) +#define QLC_83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef +#define QLC_83XX_FLASH_WRITE_CMD 0xdacdacda +#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca +#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000 +#define QLC_83XX_FLASH_STATUS_READY 0x6 +#define QLC_83XX_FLASH_BULK_WRITE_MIN 2 +#define QLC_83XX_FLASH_BULK_WRITE_MAX 64 +#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1 +#define QLC_83XX_ERASE_MODE 1 +#define QLC_83XX_WRITE_MODE 2 +#define QLC_83XX_BULK_WRITE_MODE 3 +#define QLC_83XX_FLASH_FDT_WRITE_DEF_SIG 0xFD0100 +#define QLC_83XX_FLASH_FDT_ERASE_DEF_SIG 0xFD0300 +#define QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL 0xFD009F +#define QLC_83XX_FLASH_OEM_ERASE_SIG 0xFD03D8 +#define QLC_83XX_FLASH_OEM_WRITE_SIG 0xFD0101 +#define QLC_83XX_FLASH_OEM_READ_SIG 0xFD0005 +#define QLC_83XX_FLASH_ADDR_TEMP_VAL 0x00800000 +#define QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL 0x00800001 +#define QLC_83XX_FLASH_WRDATA_DEF 0x0 +#define QLC_83XX_FLASH_READ_CTRL 0x3F +#define QLC_83XX_FLASH_SPI_CTRL 0x4 +#define QLC_83XX_FLASH_FIRST_ERASE_MS_VAL 0x2 +#define QLC_83XX_FLASH_SECOND_ERASE_MS_VAL 0x5 +#define QLC_83XX_FLASH_LAST_ERASE_MS_VAL 0x3D +#define QLC_83XX_FLASH_FIRST_MS_PATTERN 0x43 +#define QLC_83XX_FLASH_SECOND_MS_PATTERN 0x7F +#define QLC_83XX_FLASH_LAST_MS_PATTERN 0x7D +#define QLC_83xx_FLASH_MAX_WAIT_USEC 100 +#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000 + +/* Additional registers in 83xx */ +enum qlc_83xx_ext_regs { + QLCNIC_GLOBAL_RESET = 0, + QLCNIC_WILDCARD, + QLCNIC_INFORMANT, + QLCNIC_HOST_MBX_CTRL, + QLCNIC_FW_MBX_CTRL, + QLCNIC_BOOTLOADER_ADDR, + QLCNIC_BOOTLOADER_SIZE, + QLCNIC_FW_IMAGE_ADDR, + QLCNIC_MBX_INTR_ENBL, + QLCNIC_DEF_INT_MASK, + QLCNIC_DEF_INT_ID, + QLC_83XX_IDC_MAJ_VERSION, + QLC_83XX_IDC_DEV_STATE, + QLC_83XX_IDC_DRV_PRESENCE, + QLC_83XX_IDC_DRV_ACK, + QLC_83XX_IDC_CTRL, + QLC_83XX_IDC_DRV_AUDIT, + QLC_83XX_IDC_MIN_VERSION, + QLC_83XX_RECOVER_DRV_LOCK, + QLC_83XX_IDC_PF_0, + QLC_83XX_IDC_PF_1, + QLC_83XX_IDC_PF_2, + QLC_83XX_IDC_PF_3, + QLC_83XX_IDC_PF_4, + QLC_83XX_IDC_PF_5, + QLC_83XX_IDC_PF_6, + QLC_83XX_IDC_PF_7, + QLC_83XX_IDC_PF_8, + QLC_83XX_IDC_PF_9, + QLC_83XX_IDC_PF_10, + QLC_83XX_IDC_PF_11, + QLC_83XX_IDC_PF_12, + QLC_83XX_IDC_PF_13, + QLC_83XX_IDC_PF_14, + QLC_83XX_IDC_PF_15, + QLC_83XX_IDC_DEV_PARTITION_INFO_1, + QLC_83XX_IDC_DEV_PARTITION_INFO_2, + QLC_83XX_DRV_OP_MODE, + QLC_83XX_VNIC_STATE, + QLC_83XX_DRV_LOCK, + QLC_83XX_DRV_UNLOCK, + QLC_83XX_DRV_LOCK_ID, + QLC_83XX_ASIC_TEMP, +}; + +/* 83xx funcitons */ +int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *); +int qlcnic_83xx_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *); +int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8); +void qlcnic_83xx_get_func_no(struct qlcnic_adapter *); +int qlcnic_83xx_cam_lock(struct qlcnic_adapter *); +void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *); +int qlcnic_send_ctrl_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *, u32); +void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *); +void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *); +void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t); +void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t); +int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong); +int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32); +void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []); +int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32); +int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8); +int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8); +int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int); +int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int); +int qlcnic_83xx_config_intr_coalesce(struct qlcnic_adapter *); +void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, __le16); +int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *); +int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); +void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *, int); + +int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *); +void qlcnic_83xx_napi_del(struct qlcnic_adapter *); +void qlcnic_83xx_napi_enable(struct qlcnic_adapter *); +void qlcnic_83xx_napi_disable(struct qlcnic_adapter *); +int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32); +void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32); +int qlcnic_ind_rd(struct qlcnic_adapter *, u32); +int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *); +int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *, int); +int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); +int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int); +void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *); +int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool); +int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8); +int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *); +void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8, + struct qlcnic_cmd_args *); +int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *, + struct qlcnic_adapter *, u32); +void qlcnic_free_mbx_args(struct qlcnic_cmd_args *); +void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *, + struct qlcnic_info *); +void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *); +irqreturn_t qlcnic_83xx_handle_aen(int, void *); +int qlcnic_83xx_get_port_info(struct qlcnic_adapter *); +void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *); +irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *); +irqreturn_t qlcnic_83xx_intr(int, void *); +irqreturn_t qlcnic_83xx_tmp_intr(int, void *); +void qlcnic_83xx_enable_intr(struct qlcnic_adapter *, + struct qlcnic_host_sds_ring *); +void qlcnic_83xx_disable_intr(struct qlcnic_adapter *, + struct qlcnic_host_sds_ring *); +void qlcnic_83xx_check_vf(struct qlcnic_adapter *, + const struct pci_device_id *); +void qlcnic_83xx_process_aen(struct qlcnic_adapter *); +int qlcnic_83xx_get_port_config(struct qlcnic_adapter *); +int qlcnic_83xx_set_port_config(struct qlcnic_adapter *); +int qlcnic_enable_eswitch(struct qlcnic_adapter *, u8, u8); +int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *); +int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *); +int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *); +void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *); +void qlcnic_83xx_register_map(struct qlcnic_hardware_context *); +void qlcnic_83xx_idc_aen_work(struct work_struct *); +void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *, __be32, int); + +int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *, u32); +int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *, u32, u32 *, int); +int qlcnic_83xx_flash_write32(struct qlcnic_adapter *, u32, u32 *); +int qlcnic_83xx_lock_flash(struct qlcnic_adapter *); +void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *); +int qlcnic_83xx_save_flash_status(struct qlcnic_adapter *); +int qlcnic_83xx_restore_flash_status(struct qlcnic_adapter *, int); +int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *); +int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *); +int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int); +int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *, + u32, u8 *, int); +int qlcnic_83xx_init(struct qlcnic_adapter *); +int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *); +int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev); +void qlcnic_83xx_idc_poll_dev_state(struct work_struct *); +int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *); +void qlcnic_83xx_idc_exit(struct qlcnic_adapter *); +void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32); +int qlcnic_83xx_lock_driver(struct qlcnic_adapter *); +void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *); +int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *); +int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32); +int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *); +int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *, int); +int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int); +int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *); +int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *, + struct qlcnic_info *, u8); +int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *); + +void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); +void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); +int qlcnic_83xx_get_settings(struct qlcnic_adapter *); +int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); +void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, + struct ethtool_pauseparam *); +int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *, + struct ethtool_pauseparam *); +int qlcnic_83xx_test_link(struct qlcnic_adapter *); +int qlcnic_83xx_reg_test(struct qlcnic_adapter *); +int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *); +int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *); +int qlcnic_83xx_loopback_test(struct net_device *, u8); +int qlcnic_83xx_interrupt_test(struct net_device *); +int qlcnic_83xx_flash_test(struct qlcnic_adapter *); +#endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c new file mode 100644 index 000000000000..c53832b02b3e --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -0,0 +1,2054 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" +#include "qlcnic_hw.h" + +/* Reset template definitions */ +#define QLC_83XX_RESTART_TEMPLATE_SIZE 0x2000 +#define QLC_83XX_RESET_TEMPLATE_ADDR 0x4F0000 +#define QLC_83XX_RESET_SEQ_VERSION 0x0101 + +#define QLC_83XX_OPCODE_NOP 0x0000 +#define QLC_83XX_OPCODE_WRITE_LIST 0x0001 +#define QLC_83XX_OPCODE_READ_WRITE_LIST 0x0002 +#define QLC_83XX_OPCODE_POLL_LIST 0x0004 +#define QLC_83XX_OPCODE_POLL_WRITE_LIST 0x0008 +#define QLC_83XX_OPCODE_READ_MODIFY_WRITE 0x0010 +#define QLC_83XX_OPCODE_SEQ_PAUSE 0x0020 +#define QLC_83XX_OPCODE_SEQ_END 0x0040 +#define QLC_83XX_OPCODE_TMPL_END 0x0080 +#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100 + +static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter); +static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); +static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev); +static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter); + +/* Template header */ +struct qlc_83xx_reset_hdr { + u16 version; + u16 signature; + u16 size; + u16 entries; + u16 hdr_size; + u16 checksum; + u16 init_offset; + u16 start_offset; +} __packed; + +/* Command entry header. */ +struct qlc_83xx_entry_hdr { + u16 cmd; + u16 size; + u16 count; + u16 delay; +} __packed; + +/* Generic poll command */ +struct qlc_83xx_poll { + u32 mask; + u32 status; +} __packed; + +/* Read modify write command */ +struct qlc_83xx_rmw { + u32 mask; + u32 xor_value; + u32 or_value; + u8 shl; + u8 shr; + u8 index_a; + u8 rsvd; +} __packed; + +/* Generic command with 2 DWORD */ +struct qlc_83xx_entry { + u32 arg1; + u32 arg2; +} __packed; + +/* Generic command with 4 DWORD */ +struct qlc_83xx_quad_entry { + u32 dr_addr; + u32 dr_value; + u32 ar_addr; + u32 ar_value; +} __packed; +static const char *const qlc_83xx_idc_states[] = { + "Unknown", + "Cold", + "Init", + "Ready", + "Need Reset", + "Need Quiesce", + "Failed", + "Quiesce" +}; + +/* Device States */ +enum qlcnic_83xx_states { + QLC_83XX_IDC_DEV_UNKNOWN, + QLC_83XX_IDC_DEV_COLD, + QLC_83XX_IDC_DEV_INIT, + QLC_83XX_IDC_DEV_READY, + QLC_83XX_IDC_DEV_NEED_RESET, + QLC_83XX_IDC_DEV_NEED_QUISCENT, + QLC_83XX_IDC_DEV_FAILED, + QLC_83XX_IDC_DEV_QUISCENT +}; + +static int +qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter) +{ + u32 val; + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); + if ((val & 0xFFFF)) + return 1; + else + return 0; +} + +static void qlcnic_83xx_idc_log_state_history(struct qlcnic_adapter *adapter) +{ + u32 cur, prev; + cur = adapter->ahw->idc.curr_state; + prev = adapter->ahw->idc.prev_state; + + dev_info(&adapter->pdev->dev, + "current state = %s, prev state = %s\n", + adapter->ahw->idc.name[cur], + adapter->ahw->idc.name[prev]); +} + +static int qlcnic_83xx_idc_update_audit_reg(struct qlcnic_adapter *adapter, + u8 mode, int lock) +{ + u32 val; + int seconds; + + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + val = adapter->portnum & 0xf; + val |= mode << 7; + if (mode) + seconds = jiffies / HZ - adapter->ahw->idc.sec_counter; + else + seconds = jiffies / HZ; + + val |= seconds << 8; + QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT, val); + adapter->ahw->idc.sec_counter = jiffies / HZ; + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static void qlcnic_83xx_idc_update_minor_version(struct qlcnic_adapter *adapter) +{ + u32 val; + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION); + val = val & ~(0x3 << (adapter->portnum * 2)); + val = val | (QLC_83XX_IDC_MINOR_VERSION << (adapter->portnum * 2)); + QLCWRX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION, val); +} + +static int qlcnic_83xx_idc_update_major_version(struct qlcnic_adapter *adapter, + int lock) +{ + u32 val; + + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION); + val = val & ~0xFF; + val = val | QLC_83XX_IDC_MAJOR_VERSION; + QLCWRX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION, val); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int +qlcnic_83xx_idc_update_drv_presence_reg(struct qlcnic_adapter *adapter, + int status, int lock) +{ + u32 val; + + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); + + if (status) + val = val | (1 << adapter->portnum); + else + val = val & ~(1 << adapter->portnum); + + QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val); + qlcnic_83xx_idc_update_minor_version(adapter); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int qlcnic_83xx_idc_check_major_version(struct qlcnic_adapter *adapter) +{ + u32 val; + u8 version; + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION); + version = val & 0xFF; + + if (version != QLC_83XX_IDC_MAJOR_VERSION) { + dev_info(&adapter->pdev->dev, + "%s:mismatch. version 0x%x, expected version 0x%x\n", + __func__, version, QLC_83XX_IDC_MAJOR_VERSION); + return -EIO; + } + + return 0; +} + +static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter, + int lock) +{ + u32 val; + + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0); + /* Clear gracefull reset bit */ + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + val &= ~QLC_83XX_IDC_GRACEFULL_RESET; + QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int qlcnic_83xx_idc_update_drv_ack_reg(struct qlcnic_adapter *adapter, + int flag, int lock) +{ + u32 val; + + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK); + if (flag) + val = val | (1 << adapter->portnum); + else + val = val & ~(1 << adapter->portnum); + QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, val); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int qlcnic_83xx_idc_check_timeout(struct qlcnic_adapter *adapter, + int time_limit) +{ + u64 seconds; + + seconds = jiffies / HZ - adapter->ahw->idc.sec_counter; + if (seconds <= time_limit) + return 0; + else + return -EBUSY; +} + +/** + * qlcnic_83xx_idc_check_reset_ack_reg + * + * @adapter: adapter structure + * + * Check ACK wait limit and clear the functions which failed to ACK + * + * Return 0 if all functions have acknowledged the reset request. + **/ +static int qlcnic_83xx_idc_check_reset_ack_reg(struct qlcnic_adapter *adapter) +{ + int timeout; + u32 ack, presence, val; + + timeout = QLC_83XX_IDC_RESET_TIMEOUT_SECS; + ack = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK); + presence = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); + dev_info(&adapter->pdev->dev, + "%s: ack = 0x%x, presence = 0x%x\n", __func__, ack, presence); + if (!((ack & presence) == presence)) { + if (qlcnic_83xx_idc_check_timeout(adapter, timeout)) { + /* Clear functions which failed to ACK */ + dev_info(&adapter->pdev->dev, + "%s: ACK wait exceeds time limit\n", __func__); + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); + val = val & ~(ack ^ presence); + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val); + dev_info(&adapter->pdev->dev, + "%s: updated drv presence reg = 0x%x\n", + __func__, val); + qlcnic_83xx_unlock_driver(adapter); + return 0; + + } else { + return 1; + } + } else { + dev_info(&adapter->pdev->dev, + "%s: Reset ACK received from all functions\n", + __func__); + return 0; + } +} + +/** + * qlcnic_83xx_idc_tx_soft_reset + * + * @adapter: adapter structure + * + * Handle context deletion and recreation request from transmit routine + * + * Returns -EBUSY or Success (0) + * + **/ +static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + netif_device_detach(netdev); + qlcnic_down(adapter, netdev); + qlcnic_up(adapter, netdev); + netif_device_attach(netdev); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + dev_err(&adapter->pdev->dev, "%s:\n", __func__); + + adapter->netdev->trans_start = jiffies; + + return 0; +} + +/** + * qlcnic_83xx_idc_detach_driver + * + * @adapter: adapter structure + * Detach net interface, stop TX and cleanup resources before the HW reset. + * Returns: None + * + **/ +static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter) +{ + int i; + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + /* Disable mailbox interrupt */ + QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0); + qlcnic_down(adapter, netdev); + for (i = 0; i < adapter->ahw->num_msix; i++) { + adapter->ahw->intr_tbl[i].id = i; + adapter->ahw->intr_tbl[i].enabled = 0; + adapter->ahw->intr_tbl[i].src = 0; + } +} + +/** + * qlcnic_83xx_idc_attach_driver + * + * @adapter: adapter structure + * + * Re-attach and re-enable net interface + * Returns: None + * + **/ +static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (netif_running(netdev)) { + if (qlcnic_up(adapter, netdev)) + goto done; + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } +done: + netif_device_attach(netdev); + if (netif_running(netdev)) { + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } +} + +static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter, + int lock) +{ + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + qlcnic_83xx_idc_clear_registers(adapter, 0); + QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_FAILED); + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + qlcnic_83xx_idc_log_state_history(adapter); + dev_info(&adapter->pdev->dev, "Device will enter failed state\n"); + + return 0; +} + +static int qlcnic_83xx_idc_enter_init_state(struct qlcnic_adapter *adapter, + int lock) +{ + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_INIT); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int qlcnic_83xx_idc_enter_need_quiesce(struct qlcnic_adapter *adapter, + int lock) +{ + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, + QLC_83XX_IDC_DEV_NEED_QUISCENT); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int +qlcnic_83xx_idc_enter_need_reset_state(struct qlcnic_adapter *adapter, int lock) +{ + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, + QLC_83XX_IDC_DEV_NEED_RESET); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int qlcnic_83xx_idc_enter_ready_state(struct qlcnic_adapter *adapter, + int lock) +{ + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_READY); + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +/** + * qlcnic_83xx_idc_find_reset_owner_id + * + * @adapter: adapter structure + * + * NIC gets precedence over ISCSI and ISCSI has precedence over FCOE. + * Within the same class, function with lowest PCI ID assumes ownership + * + * Returns: reset owner id or failure indication (-EIO) + * + **/ +static int qlcnic_83xx_idc_find_reset_owner_id(struct qlcnic_adapter *adapter) +{ + u32 reg, reg1, reg2, i, j, owner, class; + + reg1 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_1); + reg2 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_2); + owner = QLCNIC_TYPE_NIC; + i = 0; + j = 0; + reg = reg1; + + do { + class = (((reg & (0xF << j * 4)) >> j * 4) & 0x3); + if (class == owner) + break; + if (i == (QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO - 1)) { + reg = reg2; + j = 0; + } else { + j++; + } + + if (i == (QLC_83XX_IDC_MAX_CNA_FUNCTIONS - 1)) { + if (owner == QLCNIC_TYPE_NIC) + owner = QLCNIC_TYPE_ISCSI; + else if (owner == QLCNIC_TYPE_ISCSI) + owner = QLCNIC_TYPE_FCOE; + else if (owner == QLCNIC_TYPE_FCOE) + return -EIO; + reg = reg1; + j = 0; + i = 0; + } + } while (i++ < QLC_83XX_IDC_MAX_CNA_FUNCTIONS); + + return i; +} + +static int qlcnic_83xx_idc_restart_hw(struct qlcnic_adapter *adapter, int lock) +{ + int ret = 0; + + ret = qlcnic_83xx_restart_hw(adapter); + + if (ret) { + qlcnic_83xx_idc_enter_failed_state(adapter, lock); + } else { + qlcnic_83xx_idc_clear_registers(adapter, lock); + ret = qlcnic_83xx_idc_enter_ready_state(adapter, lock); + } + + return ret; +} + +static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter) +{ + u32 status; + + status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1); + + if (status & QLCNIC_RCODE_FATAL_ERROR) { + dev_err(&adapter->pdev->dev, + "peg halt status1=0x%x\n", status); + if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) { + dev_err(&adapter->pdev->dev, + "On board active cooling fan failed. " + "Device has been halted.\n"); + dev_err(&adapter->pdev->dev, + "Replace the adapter.\n"); + return -EIO; + } + } + + return 0; +} + +static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) +{ + /* register for NIC IDC AEN Events */ + qlcnic_83xx_register_nic_idc_func(adapter, 1); + + qlcnic_83xx_enable_mbx_intrpt(adapter); + if ((adapter->flags & QLCNIC_MSIX_ENABLED)) { + if (qlcnic_83xx_config_intrpt(adapter, 1)) { + netdev_err(adapter->netdev, + "Failed to enable mbx intr\n"); + return -EIO; + } + } + + if (qlcnic_83xx_configure_opmode(adapter)) { + qlcnic_83xx_idc_enter_failed_state(adapter, 1); + return -EIO; + } + + if (adapter->nic_ops->init_driver(adapter)) { + qlcnic_83xx_idc_enter_failed_state(adapter, 1); + return -EIO; + } + + qlcnic_83xx_idc_attach_driver(adapter); + + return 0; +} + +static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) +{ + qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); + qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); + set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); + adapter->ahw->idc.quiesce_req = 0; + adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; + adapter->ahw->idc.err_code = 0; + adapter->ahw->idc.collect_dump = 0; +} + +/** + * qlcnic_83xx_idc_ready_state_entry + * + * @adapter: adapter structure + * + * Perform ready state initialization, this routine will get invoked only + * once from READY state. + * + * Returns: Error code or Success(0) + * + **/ +int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) { + qlcnic_83xx_idc_update_idc_params(adapter); + /* Re-attach the device if required */ + if ((ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) || + (ahw->idc.prev_state == QLC_83XX_IDC_DEV_INIT)) { + if (qlcnic_83xx_idc_reattach_driver(adapter)) + return -EIO; + } + } + + return 0; +} + +/** + * qlcnic_83xx_idc_vnic_pf_entry + * + * @adapter: adapter structure + * + * Ensure vNIC mode privileged function starts only after vNIC mode is + * enabled by management function. + * If vNIC mode is ready, start initialization. + * + * Returns: -EIO or 0 + * + **/ +int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *adapter) +{ + u32 state; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + /* Privileged function waits till mgmt function enables VNIC mode */ + state = QLCRDX(adapter->ahw, QLC_83XX_VNIC_STATE); + if (state != QLCNIC_DEV_NPAR_OPER) { + if (!ahw->idc.vnic_wait_limit--) { + qlcnic_83xx_idc_enter_failed_state(adapter, 1); + return -EIO; + } + dev_info(&adapter->pdev->dev, "vNIC mode disabled\n"); + return -EIO; + + } else { + /* Perform one time initialization from ready state */ + if (ahw->idc.vnic_state != QLCNIC_DEV_NPAR_OPER) { + qlcnic_83xx_idc_update_idc_params(adapter); + + /* If the previous state is UNKNOWN, device will be + already attached properly by Init routine*/ + if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_UNKNOWN) { + if (qlcnic_83xx_idc_reattach_driver(adapter)) + return -EIO; + } + adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_OPER; + dev_info(&adapter->pdev->dev, "vNIC mode enabled\n"); + } + } + + return 0; +} + +static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter) +{ + adapter->ahw->idc.err_code = -EIO; + dev_err(&adapter->pdev->dev, + "%s: Device in unknown state\n", __func__); + return 0; +} + +/** + * qlcnic_83xx_idc_cold_state + * + * @adapter: adapter structure + * + * If HW is up and running device will enter READY state. + * If firmware image from host needs to be loaded, device is + * forced to start with the file firmware image. + * + * Returns: Error code or Success(0) + * + **/ +static int qlcnic_83xx_idc_cold_state_handler(struct qlcnic_adapter *adapter) +{ + qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 0); + qlcnic_83xx_idc_update_audit_reg(adapter, 1, 0); + + if (qlcnic_load_fw_file) { + qlcnic_83xx_idc_restart_hw(adapter, 0); + } else { + if (qlcnic_83xx_check_hw_status(adapter)) { + qlcnic_83xx_idc_enter_failed_state(adapter, 0); + return -EIO; + } else { + qlcnic_83xx_idc_enter_ready_state(adapter, 0); + } + } + return 0; +} + +/** + * qlcnic_83xx_idc_init_state + * + * @adapter: adapter structure + * + * Reset owner will restart the device from this state. + * Device will enter failed state if it remains + * in this state for more than DEV_INIT time limit. + * + * Returns: Error code or Success(0) + * + **/ +static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter) +{ + int timeout, ret = 0; + u32 owner; + + timeout = QLC_83XX_IDC_INIT_TIMEOUT_SECS; + if (adapter->ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) { + owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); + if (adapter->ahw->pci_func == owner) + ret = qlcnic_83xx_idc_restart_hw(adapter, 1); + } else { + ret = qlcnic_83xx_idc_check_timeout(adapter, timeout); + return ret; + } + + return ret; +} + +/** + * qlcnic_83xx_idc_ready_state + * + * @adapter: adapter structure + * + * Perform IDC protocol specicifed actions after monitoring device state and + * events. + * + * Returns: Error code or Success(0) + * + **/ +static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) +{ + u32 val; + struct qlcnic_hardware_context *ahw = adapter->ahw; + int ret = 0; + + /* Perform NIC configuration based ready state entry actions */ + if (ahw->idc.state_entry(adapter)) + return -EIO; + + if (qlcnic_check_temp(adapter)) { + if (ahw->temp == QLCNIC_TEMP_PANIC) { + qlcnic_83xx_idc_check_fan_failure(adapter); + dev_err(&adapter->pdev->dev, + "Error: device temperature %d above limits\n", + adapter->ahw->temp); + clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); + set_bit(__QLCNIC_RESETTING, &adapter->state); + qlcnic_83xx_idc_detach_driver(adapter); + qlcnic_83xx_idc_enter_failed_state(adapter, 1); + return -EIO; + } + } + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + ret = qlcnic_83xx_check_heartbeat(adapter); + if (ret) { + adapter->flags |= QLCNIC_FW_HANG; + if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { + clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); + set_bit(__QLCNIC_RESETTING, &adapter->state); + qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); + } + return -EIO; + } + + if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) { + /* Move to need reset state and prepare for reset */ + qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); + return ret; + } + + /* Check for soft reset request */ + if (ahw->reset_context && + !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { + qlcnic_83xx_idc_tx_soft_reset(adapter); + return ret; + } + + /* Move to need quiesce state if requested */ + if (adapter->ahw->idc.quiesce_req) { + qlcnic_83xx_idc_enter_need_quiesce(adapter, 1); + qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); + return ret; + } + + return ret; +} + +/** + * qlcnic_83xx_idc_need_reset_state + * + * @adapter: adapter structure + * + * Device will remain in this state until: + * Reset request ACK's are recieved from all the functions + * Wait time exceeds max time limit + * + * Returns: Error code or Success(0) + * + **/ +static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter) +{ + int ret = 0; + + if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) { + qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1); + qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); + set_bit(__QLCNIC_RESETTING, &adapter->state); + clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); + if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) + qlcnic_83xx_disable_vnic_mode(adapter, 1); + qlcnic_83xx_idc_detach_driver(adapter); + } + + /* Check ACK from other functions */ + ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter); + if (ret) { + dev_info(&adapter->pdev->dev, + "%s: Waiting for reset ACK\n", __func__); + return 0; + } + + /* Transit to INIT state and restart the HW */ + qlcnic_83xx_idc_enter_init_state(adapter, 1); + + return ret; +} + +static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) +{ + dev_err(&adapter->pdev->dev, "%s: TBD\n", __func__); + return 0; +} + +static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) +{ + dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); + adapter->ahw->idc.err_code = -EIO; + + return 0; +} + +static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) +{ + dev_info(&adapter->pdev->dev, "%s: TBD\n", __func__); + return 0; +} + +static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter, + u32 state) +{ + u32 cur, prev, next; + + cur = adapter->ahw->idc.curr_state; + prev = adapter->ahw->idc.prev_state; + next = state; + + if ((next < QLC_83XX_IDC_DEV_COLD) || + (next > QLC_83XX_IDC_DEV_QUISCENT)) { + dev_err(&adapter->pdev->dev, + "%s: curr %d, prev %d, next state %d is invalid\n", + __func__, cur, prev, state); + return 1; + } + + if ((cur == QLC_83XX_IDC_DEV_UNKNOWN) && + (prev == QLC_83XX_IDC_DEV_UNKNOWN)) { + if ((next != QLC_83XX_IDC_DEV_COLD) && + (next != QLC_83XX_IDC_DEV_READY)) { + dev_err(&adapter->pdev->dev, + "%s: failed, cur %d prev %d next %d\n", + __func__, cur, prev, next); + return 1; + } + } + + if (next == QLC_83XX_IDC_DEV_INIT) { + if ((prev != QLC_83XX_IDC_DEV_INIT) && + (prev != QLC_83XX_IDC_DEV_COLD) && + (prev != QLC_83XX_IDC_DEV_NEED_RESET)) { + dev_err(&adapter->pdev->dev, + "%s: failed, cur %d prev %d next %d\n", + __func__, cur, prev, next); + return 1; + } + } + + return 0; +} + +static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter) +{ + if (adapter->fhash.fnum) + qlcnic_prune_lb_filters(adapter); +} + +/** + * qlcnic_83xx_idc_poll_dev_state + * + * @work: kernel work queue structure used to schedule the function + * + * Poll device state periodically and perform state specific + * actions defined by Inter Driver Communication (IDC) protocol. + * + * Returns: None + * + **/ +void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work) +{ + struct qlcnic_adapter *adapter; + u32 state; + + adapter = container_of(work, struct qlcnic_adapter, fw_work.work); + state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); + + if (qlcnic_83xx_idc_check_state_validity(adapter, state)) { + qlcnic_83xx_idc_log_state_history(adapter); + adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN; + } else { + adapter->ahw->idc.curr_state = state; + } + + switch (adapter->ahw->idc.curr_state) { + case QLC_83XX_IDC_DEV_READY: + qlcnic_83xx_idc_ready_state(adapter); + break; + case QLC_83XX_IDC_DEV_NEED_RESET: + qlcnic_83xx_idc_need_reset_state(adapter); + break; + case QLC_83XX_IDC_DEV_NEED_QUISCENT: + qlcnic_83xx_idc_need_quiesce_state(adapter); + break; + case QLC_83XX_IDC_DEV_FAILED: + qlcnic_83xx_idc_failed_state(adapter); + return; + case QLC_83XX_IDC_DEV_INIT: + qlcnic_83xx_idc_init_state(adapter); + break; + case QLC_83XX_IDC_DEV_QUISCENT: + qlcnic_83xx_idc_quiesce_state(adapter); + break; + default: + qlcnic_83xx_idc_unknown_state(adapter); + return; + } + adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; + qlcnic_83xx_periodic_tasks(adapter); + + /* Re-schedule the function */ + if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) + qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, + adapter->ahw->idc.delay); +} + +static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter) +{ + u32 idc_params, val; + + if (qlcnic_83xx_lockless_flash_read32(adapter, + QLC_83XX_IDC_FLASH_PARAM_ADDR, + (u8 *)&idc_params, 1)) { + dev_info(&adapter->pdev->dev, + "%s:failed to get IDC params from flash\n", __func__); + adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS; + adapter->reset_ack_timeo = QLC_83XX_IDC_RESET_TIMEOUT_SECS; + } else { + adapter->dev_init_timeo = idc_params & 0xFFFF; + adapter->reset_ack_timeo = ((idc_params >> 16) & 0xFFFF); + } + + adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN; + adapter->ahw->idc.prev_state = QLC_83XX_IDC_DEV_UNKNOWN; + adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; + adapter->ahw->idc.err_code = 0; + adapter->ahw->idc.collect_dump = 0; + adapter->ahw->idc.name = (char **)qlc_83xx_idc_states; + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); + set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); + + /* Check if reset recovery is disabled */ + if (!qlcnic_auto_fw_reset) { + /* Propagate do not reset request to other functions */ + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + val = val | QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY; + QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); + } +} + +static int +qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter) +{ + u32 state, val; + + if (qlcnic_83xx_lock_driver(adapter)) + return -EIO; + + /* Clear driver lock register */ + QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, 0); + if (qlcnic_83xx_idc_update_major_version(adapter, 0)) { + qlcnic_83xx_unlock_driver(adapter); + return -EIO; + } + + state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); + if (qlcnic_83xx_idc_check_state_validity(adapter, state)) { + qlcnic_83xx_unlock_driver(adapter); + return -EIO; + } + + if (state != QLC_83XX_IDC_DEV_COLD && qlcnic_load_fw_file) { + QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, + QLC_83XX_IDC_DEV_COLD); + state = QLC_83XX_IDC_DEV_COLD; + } + + adapter->ahw->idc.curr_state = state; + /* First to load function should cold boot the device */ + if (state == QLC_83XX_IDC_DEV_COLD) + qlcnic_83xx_idc_cold_state_handler(adapter); + + /* Check if reset recovery is enabled */ + if (qlcnic_auto_fw_reset) { + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + val = val & ~QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY; + QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); + } + + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter) +{ + int ret = -EIO; + + qlcnic_83xx_setup_idc_parameters(adapter); + + if (qlcnic_83xx_get_reset_instruction_template(adapter)) + return ret; + + if (!qlcnic_83xx_idc_check_driver_presence_reg(adapter)) { + if (qlcnic_83xx_idc_first_to_load_function_handler(adapter)) + return -EIO; + } else { + if (qlcnic_83xx_idc_check_major_version(adapter)) + return -EIO; + } + + qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); + + return 0; +} + +void qlcnic_83xx_idc_exit(struct qlcnic_adapter *adapter) +{ + int id; + u32 val; + + while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + usleep_range(10000, 11000); + + id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); + id = id & 0xFF; + + if (id == adapter->portnum) { + dev_err(&adapter->pdev->dev, + "%s: wait for lock recovery.. %d\n", __func__, id); + msleep(20); + id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); + id = id & 0xFF; + } + + /* Clear driver presence bit */ + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); + val = val & ~(1 << adapter->portnum); + QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val); + clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + cancel_delayed_work_sync(&adapter->fw_work); +} + +void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key) +{ + u32 val; + + if (qlcnic_83xx_lock_driver(adapter)) { + dev_err(&adapter->pdev->dev, + "%s:failed, please retry\n", __func__); + return; + } + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) || + !qlcnic_auto_fw_reset) { + dev_err(&adapter->pdev->dev, + "%s:failed, device in non reset mode\n", __func__); + qlcnic_83xx_unlock_driver(adapter); + return; + } + + if (key == QLCNIC_FORCE_FW_RESET) { + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + val = val | QLC_83XX_IDC_GRACEFULL_RESET; + QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); + } else if (key == QLCNIC_FORCE_FW_DUMP_KEY) { + adapter->ahw->idc.collect_dump = 1; + } + + qlcnic_83xx_unlock_driver(adapter); + return; +} + +static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter) +{ + u8 *p_cache; + u32 src, size; + u64 dest; + int ret = -EIO; + + src = QLC_83XX_BOOTLOADER_FLASH_ADDR; + dest = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_ADDR); + size = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_SIZE); + + /* alignment check */ + if (size & 0xF) + size = (size + 16) & ~0xF; + + p_cache = kzalloc(size, GFP_KERNEL); + if (p_cache == NULL) + return -ENOMEM; + + ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, + size / sizeof(u32)); + if (ret) { + kfree(p_cache); + return ret; + } + /* 16 byte write to MS memory */ + ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, + size / 16); + if (ret) { + kfree(p_cache); + return ret; + } + kfree(p_cache); + + return ret; +} + +static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) +{ + u32 dest, *p_cache; + u64 addr; + u8 data[16]; + size_t size; + int i, ret = -EIO; + + dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR); + size = (adapter->ahw->fw_info.fw->size & ~0xF); + p_cache = (u32 *)adapter->ahw->fw_info.fw->data; + addr = (u64)dest; + + ret = qlcnic_83xx_ms_mem_write128(adapter, addr, + (u32 *)p_cache, size / 16); + if (ret) { + dev_err(&adapter->pdev->dev, "MS memory write failed\n"); + release_firmware(adapter->ahw->fw_info.fw); + adapter->ahw->fw_info.fw = NULL; + return -EIO; + } + + /* alignment check */ + if (adapter->ahw->fw_info.fw->size & 0xF) { + addr = dest + size; + for (i = 0; i < (adapter->ahw->fw_info.fw->size & 0xF); i++) + data[i] = adapter->ahw->fw_info.fw->data[size + i]; + for (; i < 16; i++) + data[i] = 0; + ret = qlcnic_83xx_ms_mem_write128(adapter, addr, + (u32 *)data, 1); + if (ret) { + dev_err(&adapter->pdev->dev, + "MS memory write failed\n"); + release_firmware(adapter->ahw->fw_info.fw); + adapter->ahw->fw_info.fw = NULL; + return -EIO; + } + } + release_firmware(adapter->ahw->fw_info.fw); + adapter->ahw->fw_info.fw = NULL; + + return 0; +} + +static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) +{ + int i, j; + u32 val = 0, val1 = 0, reg = 0; + + val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG); + dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val); + + for (j = 0; j < 2; j++) { + if (j == 0) { + dev_info(&adapter->pdev->dev, + "Port 0 RxB Pause Threshold Regs[TC7..TC0]:"); + reg = QLC_83XX_PORT0_THRESHOLD; + } else if (j == 1) { + dev_info(&adapter->pdev->dev, + "Port 1 RxB Pause Threshold Regs[TC7..TC0]:"); + reg = QLC_83XX_PORT1_THRESHOLD; + } + for (i = 0; i < 8; i++) { + val = QLCRD32(adapter, reg + (i * 0x4)); + dev_info(&adapter->pdev->dev, "0x%x ", val); + } + dev_info(&adapter->pdev->dev, "\n"); + } + + for (j = 0; j < 2; j++) { + if (j == 0) { + dev_info(&adapter->pdev->dev, + "Port 0 RxB TC Max Cell Registers[4..1]:"); + reg = QLC_83XX_PORT0_TC_MC_REG; + } else if (j == 1) { + dev_info(&adapter->pdev->dev, + "Port 1 RxB TC Max Cell Registers[4..1]:"); + reg = QLC_83XX_PORT1_TC_MC_REG; + } + for (i = 0; i < 4; i++) { + val = QLCRD32(adapter, reg + (i * 0x4)); + dev_info(&adapter->pdev->dev, "0x%x ", val); + } + dev_info(&adapter->pdev->dev, "\n"); + } + + for (j = 0; j < 2; j++) { + if (j == 0) { + dev_info(&adapter->pdev->dev, + "Port 0 RxB Rx TC Stats[TC7..TC0]:"); + reg = QLC_83XX_PORT0_TC_STATS; + } else if (j == 1) { + dev_info(&adapter->pdev->dev, + "Port 1 RxB Rx TC Stats[TC7..TC0]:"); + reg = QLC_83XX_PORT1_TC_STATS; + } + for (i = 7; i >= 0; i--) { + val = QLCRD32(adapter, reg); + val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ + QLCWR32(adapter, reg, (val | (i << 29))); + val = QLCRD32(adapter, reg); + dev_info(&adapter->pdev->dev, "0x%x ", val); + } + dev_info(&adapter->pdev->dev, "\n"); + } + + val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD); + val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD); + dev_info(&adapter->pdev->dev, + "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n", + val, val1); +} + + +static void qlcnic_83xx_disable_pause_frames(struct qlcnic_adapter *adapter) +{ + u32 reg = 0, i, j; + + if (qlcnic_83xx_lock_driver(adapter)) { + dev_err(&adapter->pdev->dev, + "%s:failed to acquire driver lock\n", __func__); + return; + } + + qlcnic_83xx_dump_pause_control_regs(adapter); + QLCWR32(adapter, QLC_83XX_SRE_SHIM_REG, 0x0); + + for (j = 0; j < 2; j++) { + if (j == 0) + reg = QLC_83XX_PORT0_THRESHOLD; + else if (j == 1) + reg = QLC_83XX_PORT1_THRESHOLD; + + for (i = 0; i < 8; i++) + QLCWR32(adapter, reg + (i * 0x4), 0x0); + } + + for (j = 0; j < 2; j++) { + if (j == 0) + reg = QLC_83XX_PORT0_TC_MC_REG; + else if (j == 1) + reg = QLC_83XX_PORT1_TC_MC_REG; + + for (i = 0; i < 4; i++) + QLCWR32(adapter, reg + (i * 0x4), 0x03FF03FF); + } + + QLCWR32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, 0); + QLCWR32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, 0); + dev_info(&adapter->pdev->dev, + "Disabled pause frames successfully on all ports\n"); + qlcnic_83xx_unlock_driver(adapter); +} + +static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev) +{ + u32 heartbeat, peg_status; + int retries, ret = -EIO; + + retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; + p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev, + QLCNIC_PEG_ALIVE_COUNTER); + + do { + msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS); + heartbeat = QLC_SHARED_REG_RD32(p_dev, + QLCNIC_PEG_ALIVE_COUNTER); + if (heartbeat != p_dev->heartbeat) { + ret = QLCNIC_RCODE_SUCCESS; + break; + } + } while (--retries); + + if (ret) { + dev_err(&p_dev->pdev->dev, "firmware hang detected\n"); + qlcnic_83xx_disable_pause_frames(p_dev); + peg_status = QLC_SHARED_REG_RD32(p_dev, + QLCNIC_PEG_HALT_STATUS1); + dev_info(&p_dev->pdev->dev, "Dumping HW/FW registers\n" + "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" + "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" + "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" + "PEG_NET_4_PC: 0x%x\n", peg_status, + QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2), + QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0), + QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1), + QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2), + QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3), + QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4)); + + if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) + dev_err(&p_dev->pdev->dev, + "Device is being reset err code 0x00006700.\n"); + } + + return ret; +} + +static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev) +{ + int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT; + u32 val; + + do { + val = QLC_SHARED_REG_RD32(p_dev, QLCNIC_CMDPEG_STATE); + if (val == QLC_83XX_CMDPEG_COMPLETE) + return 0; + msleep(QLCNIC_CMDPEG_CHECK_DELAY); + } while (--retries); + + dev_err(&p_dev->pdev->dev, "%s: failed, state = 0x%x\n", __func__, val); + return -EIO; +} + +int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev) +{ + int err; + + err = qlcnic_83xx_check_cmd_peg_status(p_dev); + if (err) + return err; + + err = qlcnic_83xx_check_heartbeat(p_dev); + if (err) + return err; + + return err; +} + +static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr, + int duration, u32 mask, u32 status) +{ + u32 value; + int timeout_error; + u8 retries; + + value = qlcnic_83xx_rd_reg_indirect(p_dev, addr); + retries = duration / 10; + + do { + if ((value & mask) != status) { + timeout_error = 1; + msleep(duration / 10); + value = qlcnic_83xx_rd_reg_indirect(p_dev, addr); + } else { + timeout_error = 0; + break; + } + } while (retries--); + + if (timeout_error) { + p_dev->ahw->reset.seq_error++; + dev_err(&p_dev->pdev->dev, + "%s: Timeout Err, entry_num = %d\n", + __func__, p_dev->ahw->reset.seq_index); + dev_err(&p_dev->pdev->dev, + "0x%08x 0x%08x 0x%08x\n", + value, mask, status); + } + + return timeout_error; +} + +static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev) +{ + u32 sum = 0; + u16 *buff = (u16 *)p_dev->ahw->reset.buff; + int count = p_dev->ahw->reset.hdr->size / sizeof(u16); + + while (count-- > 0) + sum += *buff++; + + while (sum >> 16) + sum = (sum & 0xFFFF) + (sum >> 16); + + if (~sum) { + return 0; + } else { + dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); + return -1; + } +} + +int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev) +{ + u8 *p_buff; + u32 addr, count; + struct qlcnic_hardware_context *ahw = p_dev->ahw; + + ahw->reset.seq_error = 0; + ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL); + if (p_dev->ahw->reset.buff == NULL) + return -ENOMEM; + + p_buff = p_dev->ahw->reset.buff; + addr = QLC_83XX_RESET_TEMPLATE_ADDR; + count = sizeof(struct qlc_83xx_reset_hdr) / sizeof(u32); + + /* Copy template header from flash */ + if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) { + dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__); + return -EIO; + } + ahw->reset.hdr = (struct qlc_83xx_reset_hdr *)ahw->reset.buff; + addr = QLC_83XX_RESET_TEMPLATE_ADDR + ahw->reset.hdr->hdr_size; + p_buff = ahw->reset.buff + ahw->reset.hdr->hdr_size; + count = (ahw->reset.hdr->size - ahw->reset.hdr->hdr_size) / sizeof(u32); + + /* Copy rest of the template */ + if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) { + dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__); + return -EIO; + } + + if (qlcnic_83xx_reset_template_checksum(p_dev)) + return -EIO; + /* Get Stop, Start and Init command offsets */ + ahw->reset.init_offset = ahw->reset.buff + ahw->reset.hdr->init_offset; + ahw->reset.start_offset = ahw->reset.buff + + ahw->reset.hdr->start_offset; + ahw->reset.stop_offset = ahw->reset.buff + ahw->reset.hdr->hdr_size; + return 0; +} + +/* Read Write HW register command */ +static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev, + u32 raddr, u32 waddr) +{ + int value; + + value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr); + qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value); +} + +/* Read Modify Write HW register command */ +static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev, + u32 raddr, u32 waddr, + struct qlc_83xx_rmw *p_rmw_hdr) +{ + int value; + + if (p_rmw_hdr->index_a) + value = p_dev->ahw->reset.array[p_rmw_hdr->index_a]; + else + value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr); + + value &= p_rmw_hdr->mask; + value <<= p_rmw_hdr->shl; + value >>= p_rmw_hdr->shr; + value |= p_rmw_hdr->or_value; + value ^= p_rmw_hdr->xor_value; + qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value); +} + +/* Write HW register command */ +static void qlcnic_83xx_write_list(struct qlcnic_adapter *p_dev, + struct qlc_83xx_entry_hdr *p_hdr) +{ + int i; + struct qlc_83xx_entry *entry; + + entry = (struct qlc_83xx_entry *)((char *)p_hdr + + sizeof(struct qlc_83xx_entry_hdr)); + + for (i = 0; i < p_hdr->count; i++, entry++) { + qlcnic_83xx_wrt_reg_indirect(p_dev, entry->arg1, + entry->arg2); + if (p_hdr->delay) + udelay((u32)(p_hdr->delay)); + } +} + +/* Read and Write instruction */ +static void qlcnic_83xx_read_write_list(struct qlcnic_adapter *p_dev, + struct qlc_83xx_entry_hdr *p_hdr) +{ + int i; + struct qlc_83xx_entry *entry; + + entry = (struct qlc_83xx_entry *)((char *)p_hdr + + sizeof(struct qlc_83xx_entry_hdr)); + + for (i = 0; i < p_hdr->count; i++, entry++) { + qlcnic_83xx_read_write_crb_reg(p_dev, entry->arg1, + entry->arg2); + if (p_hdr->delay) + udelay((u32)(p_hdr->delay)); + } +} + +/* Poll HW register command */ +static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev, + struct qlc_83xx_entry_hdr *p_hdr) +{ + long delay; + struct qlc_83xx_entry *entry; + struct qlc_83xx_poll *poll; + int i; + unsigned long arg1, arg2; + + poll = (struct qlc_83xx_poll *)((char *)p_hdr + + sizeof(struct qlc_83xx_entry_hdr)); + + entry = (struct qlc_83xx_entry *)((char *)poll + + sizeof(struct qlc_83xx_poll)); + delay = (long)p_hdr->delay; + + if (!delay) { + for (i = 0; i < p_hdr->count; i++, entry++) + qlcnic_83xx_poll_reg(p_dev, entry->arg1, + delay, poll->mask, + poll->status); + } else { + for (i = 0; i < p_hdr->count; i++, entry++) { + arg1 = entry->arg1; + arg2 = entry->arg2; + if (delay) { + if (qlcnic_83xx_poll_reg(p_dev, + arg1, delay, + poll->mask, + poll->status)){ + qlcnic_83xx_rd_reg_indirect(p_dev, + arg1); + qlcnic_83xx_rd_reg_indirect(p_dev, + arg2); + } + } + } + } +} + +/* Poll and write HW register command */ +static void qlcnic_83xx_poll_write_list(struct qlcnic_adapter *p_dev, + struct qlc_83xx_entry_hdr *p_hdr) +{ + int i; + long delay; + struct qlc_83xx_quad_entry *entry; + struct qlc_83xx_poll *poll; + + poll = (struct qlc_83xx_poll *)((char *)p_hdr + + sizeof(struct qlc_83xx_entry_hdr)); + entry = (struct qlc_83xx_quad_entry *)((char *)poll + + sizeof(struct qlc_83xx_poll)); + delay = (long)p_hdr->delay; + + for (i = 0; i < p_hdr->count; i++, entry++) { + qlcnic_83xx_wrt_reg_indirect(p_dev, entry->dr_addr, + entry->dr_value); + qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr, + entry->ar_value); + if (delay) + qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay, + poll->mask, poll->status); + } +} + +/* Read Modify Write register command */ +static void qlcnic_83xx_read_modify_write(struct qlcnic_adapter *p_dev, + struct qlc_83xx_entry_hdr *p_hdr) +{ + int i; + struct qlc_83xx_entry *entry; + struct qlc_83xx_rmw *rmw_hdr; + + rmw_hdr = (struct qlc_83xx_rmw *)((char *)p_hdr + + sizeof(struct qlc_83xx_entry_hdr)); + + entry = (struct qlc_83xx_entry *)((char *)rmw_hdr + + sizeof(struct qlc_83xx_rmw)); + + for (i = 0; i < p_hdr->count; i++, entry++) { + qlcnic_83xx_rmw_crb_reg(p_dev, entry->arg1, + entry->arg2, rmw_hdr); + if (p_hdr->delay) + udelay((u32)(p_hdr->delay)); + } +} + +static void qlcnic_83xx_pause(struct qlc_83xx_entry_hdr *p_hdr) +{ + if (p_hdr->delay) + mdelay((u32)((long)p_hdr->delay)); +} + +/* Read and poll register command */ +static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev, + struct qlc_83xx_entry_hdr *p_hdr) +{ + long delay; + int index, i, j; + struct qlc_83xx_quad_entry *entry; + struct qlc_83xx_poll *poll; + unsigned long addr; + + poll = (struct qlc_83xx_poll *)((char *)p_hdr + + sizeof(struct qlc_83xx_entry_hdr)); + + entry = (struct qlc_83xx_quad_entry *)((char *)poll + + sizeof(struct qlc_83xx_poll)); + delay = (long)p_hdr->delay; + + for (i = 0; i < p_hdr->count; i++, entry++) { + qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr, + entry->ar_value); + if (delay) { + if (!qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay, + poll->mask, poll->status)){ + index = p_dev->ahw->reset.array_index; + addr = entry->dr_addr; + j = qlcnic_83xx_rd_reg_indirect(p_dev, addr); + p_dev->ahw->reset.array[index++] = j; + + if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES) + p_dev->ahw->reset.array_index = 1; + } + } + } +} + +static inline void qlcnic_83xx_seq_end(struct qlcnic_adapter *p_dev) +{ + p_dev->ahw->reset.seq_end = 1; +} + +static void qlcnic_83xx_template_end(struct qlcnic_adapter *p_dev) +{ + p_dev->ahw->reset.template_end = 1; + if (p_dev->ahw->reset.seq_error == 0) + dev_err(&p_dev->pdev->dev, + "HW restart process completed successfully.\n"); + else + dev_err(&p_dev->pdev->dev, + "HW restart completed with timeout errors.\n"); +} + +/** +* qlcnic_83xx_exec_template_cmd +* +* @p_dev: adapter structure +* @p_buff: Poiter to instruction template +* +* Template provides instructions to stop, restart and initalize firmware. +* These instructions are abstracted as a series of read, write and +* poll operations on hardware registers. Register information and operation +* specifics are not exposed to the driver. Driver reads the template from +* flash and executes the instructions located at pre-defined offsets. +* +* Returns: None +* */ +static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, + char *p_buff) +{ + int index, entries; + struct qlc_83xx_entry_hdr *p_hdr; + char *entry = p_buff; + + p_dev->ahw->reset.seq_end = 0; + p_dev->ahw->reset.template_end = 0; + entries = p_dev->ahw->reset.hdr->entries; + index = p_dev->ahw->reset.seq_index; + + for (; (!p_dev->ahw->reset.seq_end) && (index < entries); index++) { + p_hdr = (struct qlc_83xx_entry_hdr *)entry; + + switch (p_hdr->cmd) { + case QLC_83XX_OPCODE_NOP: + break; + case QLC_83XX_OPCODE_WRITE_LIST: + qlcnic_83xx_write_list(p_dev, p_hdr); + break; + case QLC_83XX_OPCODE_READ_WRITE_LIST: + qlcnic_83xx_read_write_list(p_dev, p_hdr); + break; + case QLC_83XX_OPCODE_POLL_LIST: + qlcnic_83xx_poll_list(p_dev, p_hdr); + break; + case QLC_83XX_OPCODE_POLL_WRITE_LIST: + qlcnic_83xx_poll_write_list(p_dev, p_hdr); + break; + case QLC_83XX_OPCODE_READ_MODIFY_WRITE: + qlcnic_83xx_read_modify_write(p_dev, p_hdr); + break; + case QLC_83XX_OPCODE_SEQ_PAUSE: + qlcnic_83xx_pause(p_hdr); + break; + case QLC_83XX_OPCODE_SEQ_END: + qlcnic_83xx_seq_end(p_dev); + break; + case QLC_83XX_OPCODE_TMPL_END: + qlcnic_83xx_template_end(p_dev); + break; + case QLC_83XX_OPCODE_POLL_READ_LIST: + qlcnic_83xx_poll_read_list(p_dev, p_hdr); + break; + default: + dev_err(&p_dev->pdev->dev, + "%s: Unknown opcode 0x%04x in template %d\n", + __func__, p_hdr->cmd, index); + break; + } + entry += p_hdr->size; + } + p_dev->ahw->reset.seq_index = index; +} + +static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) +{ + p_dev->ahw->reset.seq_index = 0; + + qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.stop_offset); + if (p_dev->ahw->reset.seq_end != 1) + dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); +} + +static void qlcnic_83xx_start_hw(struct qlcnic_adapter *p_dev) +{ + qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.start_offset); + if (p_dev->ahw->reset.template_end != 1) + dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); +} + +static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev) +{ + qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.init_offset); + if (p_dev->ahw->reset.seq_end != 1) + dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); +} + +static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter) +{ + int err = -EIO; + + if (request_firmware(&adapter->ahw->fw_info.fw, + QLC_83XX_FW_FILE_NAME, &(adapter->pdev->dev))) { + dev_err(&adapter->pdev->dev, + "No file FW image, loading flash FW image.\n"); + QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, + QLC_83XX_BOOT_FROM_FLASH); + } else { + if (qlcnic_83xx_copy_fw_file(adapter)) + return err; + QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, + QLC_83XX_BOOT_FROM_FILE); + } + + return 0; +} + +static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) +{ + u32 val; + int err = -EIO; + + qlcnic_83xx_stop_hw(adapter); + + /* Collect FW register dump if required */ + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) + qlcnic_dump_fw(adapter); + qlcnic_83xx_init_hw(adapter); + + if (qlcnic_83xx_copy_bootloader(adapter)) + return err; + /* Boot either flash image or firmware image from host file system */ + if (qlcnic_load_fw_file) { + if (qlcnic_83xx_load_fw_image_from_host(adapter)) + return err; + } else { + QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, + QLC_83XX_BOOT_FROM_FLASH); + } + + qlcnic_83xx_start_hw(adapter); + if (qlcnic_83xx_check_hw_status(adapter)) + return -EIO; + + return 0; +} + +/** +* qlcnic_83xx_config_default_opmode +* +* @adapter: adapter structure +* +* Configure default driver operating mode +* +* Returns: Error code or Success(0) +* */ +int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter) +{ + u32 op_mode; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + qlcnic_get_func_no(adapter); + op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE); + + if (op_mode == QLC_83XX_DEFAULT_OPMODE) { + adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; + ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; + } else { + return -EIO; + } + + return 0; +} + +int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) +{ + int err; + struct qlcnic_info nic_info; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + memset(&nic_info, 0, sizeof(struct qlcnic_info)); + err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func); + if (err) + return -EIO; + + ahw->physical_port = (u8) nic_info.phys_port; + ahw->switch_mode = nic_info.switch_mode; + ahw->max_tx_ques = nic_info.max_tx_ques; + ahw->max_rx_ques = nic_info.max_rx_ques; + ahw->capabilities = nic_info.capabilities; + ahw->max_mac_filters = nic_info.max_mac_filters; + ahw->max_mtu = nic_info.max_mtu; + + if (ahw->capabilities & BIT_23) + ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE; + else + ahw->nic_mode = QLC_83XX_DEFAULT_MODE; + + return ahw->nic_mode; +} + +static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) +{ + int ret; + + ret = qlcnic_83xx_get_nic_configuration(adapter); + if (ret == -EIO) + return -EIO; + + if (ret == QLC_83XX_VIRTUAL_NIC_MODE) { + if (qlcnic_83xx_config_vnic_opmode(adapter)) + return -EIO; + } else if (ret == QLC_83XX_DEFAULT_MODE) { + if (qlcnic_83xx_config_default_opmode(adapter)) + return -EIO; + } + + return 0; +} + +static void qlcnic_83xx_config_buff_descriptors(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (ahw->port_type == QLCNIC_XGBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + + } else if (ahw->port_type == QLCNIC_GBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; + } + adapter->num_txd = MAX_CMD_DESCRIPTORS; + adapter->max_rds_rings = MAX_RDS_RINGS; +} + +static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter) +{ + int err = -EIO; + + qlcnic_83xx_get_minidump_template(adapter); + if (qlcnic_83xx_get_port_info(adapter)) + return err; + + qlcnic_83xx_config_buff_descriptors(adapter); + adapter->ahw->msix_supported = !!qlcnic_use_msi_x; + adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; + + dev_info(&adapter->pdev->dev, "HAL Version: %d\n", + adapter->ahw->fw_hal_version); + + return 0; +} + +#define IS_QLC_83XX_USED(a, b, c) (((1 << a->portnum) & b) || ((c >> 6) & 0x1)) +static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_cmd_args cmd; + u32 presence_mask, audit_mask; + int status; + + presence_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); + audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT); + + if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) { + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC); + cmd.req.arg[1] = BIT_31; + status = qlcnic_issue_cmd(adapter, &cmd); + if (status) + dev_err(&adapter->pdev->dev, + "Failed to clean up the function resources\n"); + qlcnic_free_mbx_args(&cmd); + } +} + +int qlcnic_83xx_init(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (qlcnic_83xx_check_hw_status(adapter)) + return -EIO; + + /* Initilaize 83xx mailbox spinlock */ + spin_lock_init(&ahw->mbx_lock); + + set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); + qlcnic_83xx_clear_function_resources(adapter); + + /* register for NIC IDC AEN Events */ + qlcnic_83xx_register_nic_idc_func(adapter, 1); + + if (!qlcnic_83xx_read_flash_descriptor_table(adapter)) + qlcnic_83xx_read_flash_mfg_id(adapter); + + if (qlcnic_83xx_idc_init(adapter)) + return -EIO; + + /* Configure default, SR-IOV or Virtual NIC mode of operation */ + if (qlcnic_83xx_configure_opmode(adapter)) + return -EIO; + + /* Perform operating mode specific initialization */ + if (adapter->nic_ops->init_driver(adapter)) + return -EIO; + + INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); + + /* Periodically monitor device status */ + qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); + + return adapter->ahw->idc.err_code; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c new file mode 100644 index 000000000000..b0c3de9ede03 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c @@ -0,0 +1,225 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" +#include "qlcnic_hw.h" + +int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock) +{ + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_OPER); + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_NON_OPER); + ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER; + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter) +{ + u8 id; + int i, ret = -EBUSY; + u32 data = QLCNIC_MGMT_FUNC; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (qlcnic_83xx_lock_driver(adapter)) + return ret; + + if (qlcnic_config_npars) { + for (i = 0; i < ahw->act_pci_func; i++) { + id = adapter->npars[i].pci_func; + if (id == ahw->pci_func) + continue; + data |= qlcnic_config_npars & + QLC_83XX_SET_FUNC_OPMODE(0x3, id); + } + } else { + data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); + data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, ahw->pci_func)) | + QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC, + ahw->pci_func); + } + QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data); + + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static void +qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (ahw->port_type == QLCNIC_XGBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + + } else if (ahw->port_type == QLCNIC_GBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; + } + adapter->num_txd = MAX_CMD_DESCRIPTORS; + adapter->max_rds_rings = MAX_RDS_RINGS; +} + + +/** + * qlcnic_83xx_init_mgmt_vnic + * + * @adapter: adapter structure + * Management virtual NIC sets the operational mode of other vNIC's and + * configures embedded switch (ESWITCH). + * Returns: Success(0) or error code. + * + **/ +static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter) +{ + int err = -EIO; + + qlcnic_83xx_get_minidump_template(adapter); + if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) { + if (qlcnic_init_pci_info(adapter)) + return err; + + if (qlcnic_83xx_set_vnic_opmode(adapter)) + return err; + + if (qlcnic_set_default_offload_settings(adapter)) + return err; + } else { + if (qlcnic_reset_npar_config(adapter)) + return err; + } + + if (qlcnic_83xx_get_port_info(adapter)) + return err; + + qlcnic_83xx_config_vnic_buff_descriptors(adapter); + adapter->ahw->msix_supported = !!qlcnic_use_msi_x; + adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; + qlcnic_83xx_enable_vnic_mode(adapter, 1); + + dev_info(&adapter->pdev->dev, "HAL Version: %d, Management function\n", + adapter->ahw->fw_hal_version); + + return 0; +} + +static int qlcnic_83xx_init_privileged_vnic(struct qlcnic_adapter *adapter) +{ + int err = -EIO; + + qlcnic_83xx_get_minidump_template(adapter); + if (qlcnic_83xx_get_port_info(adapter)) + return err; + + qlcnic_83xx_config_vnic_buff_descriptors(adapter); + adapter->ahw->msix_supported = !!qlcnic_use_msi_x; + adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; + + dev_info(&adapter->pdev->dev, + "HAL Version: %d, Privileged function\n", + adapter->ahw->fw_hal_version); + return 0; +} + +static int qlcnic_83xx_init_non_privileged_vnic(struct qlcnic_adapter *adapter) +{ + int err = -EIO; + + qlcnic_83xx_get_fw_version(adapter); + if (qlcnic_set_eswitch_port_config(adapter)) + return err; + + if (qlcnic_83xx_get_port_info(adapter)) + return err; + + qlcnic_83xx_config_vnic_buff_descriptors(adapter); + adapter->ahw->msix_supported = !!qlcnic_use_msi_x; + adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; + + dev_info(&adapter->pdev->dev, "HAL Version: %d, Virtual function\n", + adapter->ahw->fw_hal_version); + + return 0; +} + +/** + * qlcnic_83xx_vnic_opmode + * + * @adapter: adapter structure + * Identify virtual NIC operational modes. + * + * Returns: Success(0) or error code. + * + **/ +int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter) +{ + u32 op_mode, priv_level; + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_nic_template *nic_ops = adapter->nic_ops; + + qlcnic_get_func_no(adapter); + op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); + + if (op_mode == QLC_83XX_DEFAULT_OPMODE) + priv_level = QLCNIC_MGMT_FUNC; + else + priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode, + ahw->pci_func); + + if (priv_level == QLCNIC_NON_PRIV_FUNC) { + ahw->op_mode = QLCNIC_NON_PRIV_FUNC; + ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; + nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic; + } else if (priv_level == QLCNIC_PRIV_FUNC) { + ahw->op_mode = QLCNIC_PRIV_FUNC; + ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry; + nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic; + } else if (priv_level == QLCNIC_MGMT_FUNC) { + ahw->op_mode = QLCNIC_MGMT_FUNC; + ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; + nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic; + } else { + return -EIO; + } + + if (ahw->capabilities & BIT_23) + adapter->flags |= QLCNIC_ESWITCH_ENABLED; + else + adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; + + adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER; + adapter->ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO; + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index b14b8f0787ea..a69097c6b84d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -1,12 +1,92 @@ /* * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation + * Copyright (c) 2009-2013 QLogic Corporation * * See LICENSE.qlcnic for copyright and licensing details. */ #include "qlcnic.h" +static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { + {QLCNIC_CMD_CREATE_RX_CTX, 4, 1}, + {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1}, + {QLCNIC_CMD_CREATE_TX_CTX, 4, 1}, + {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1}, + {QLCNIC_CMD_INTRPT_TEST, 4, 1}, + {QLCNIC_CMD_SET_MTU, 4, 1}, + {QLCNIC_CMD_READ_PHY, 4, 2}, + {QLCNIC_CMD_WRITE_PHY, 5, 1}, + {QLCNIC_CMD_READ_HW_REG, 4, 1}, + {QLCNIC_CMD_GET_FLOW_CTL, 4, 2}, + {QLCNIC_CMD_SET_FLOW_CTL, 4, 1}, + {QLCNIC_CMD_READ_MAX_MTU, 4, 2}, + {QLCNIC_CMD_READ_MAX_LRO, 4, 2}, + {QLCNIC_CMD_MAC_ADDRESS, 4, 3}, + {QLCNIC_CMD_GET_PCI_INFO, 4, 1}, + {QLCNIC_CMD_GET_NIC_INFO, 4, 1}, + {QLCNIC_CMD_SET_NIC_INFO, 4, 1}, + {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3}, + {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1}, + {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3}, + {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1}, + {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1}, + {QLCNIC_CMD_GET_MAC_STATS, 4, 1}, + {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3}, + {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1}, + {QLCNIC_CMD_CONFIG_PORT, 4, 1}, + {QLCNIC_CMD_TEMP_SIZE, 4, 4}, + {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, + {QLCNIC_CMD_SET_DRV_VER, 4, 1}, +}; + +static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) +{ + return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) | + (0xcafe << 16); +} + +/* Allocate mailbox registers */ +int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, + struct qlcnic_adapter *adapter, u32 type) +{ + int i, size; + const struct qlcnic_mailbox_metadata *mbx_tbl; + + mbx_tbl = qlcnic_mbx_tbl; + size = ARRAY_SIZE(qlcnic_mbx_tbl); + for (i = 0; i < size; i++) { + if (type == mbx_tbl[i].cmd) { + mbx->req.num = mbx_tbl[i].in_args; + mbx->rsp.num = mbx_tbl[i].out_args; + mbx->req.arg = kcalloc(mbx->req.num, + sizeof(u32), GFP_ATOMIC); + if (!mbx->req.arg) + return -ENOMEM; + mbx->rsp.arg = kcalloc(mbx->rsp.num, + sizeof(u32), GFP_ATOMIC); + if (!mbx->rsp.arg) { + kfree(mbx->req.arg); + mbx->req.arg = NULL; + return -ENOMEM; + } + memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); + memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); + mbx->req.arg[0] = type; + break; + } + } + return 0; +} + +/* Free up mailbox registers */ +void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd) +{ + kfree(cmd->req.arg); + cmd->req.arg = NULL; + kfree(cmd->rsp.arg); + cmd->rsp.arg = NULL; +} + static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func) { int i; @@ -38,194 +118,123 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter) return rsp; } -void -qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) +int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) { + int i; u32 rsp; u32 signature; struct pci_dev *pdev = adapter->pdev; struct qlcnic_hardware_context *ahw = adapter->ahw; + const char *fmt; - signature = QLCNIC_CDRP_SIGNATURE_MAKE(ahw->pci_func, - adapter->ahw->fw_hal_version); + signature = qlcnic_get_cmd_signature(ahw); /* Acquire semaphore before accessing CRB */ if (qlcnic_api_lock(adapter)) { - cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT; - return; + cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; + return cmd->rsp.arg[0]; } QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); - QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, cmd->req.arg1); - QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, cmd->req.arg2); - QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, cmd->req.arg3); + for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++) + QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]); QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, - QLCNIC_CDRP_FORM_CMD(cmd->req.cmd)); - + QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0])); rsp = qlcnic_poll_rsp(adapter); if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { - dev_err(&pdev->dev, "CDRP response timeout.\n"); - cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT; + dev_err(&pdev->dev, "card response timeout.\n"); + cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { - cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); - switch (cmd->rsp.cmd) { + cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1)); + switch (cmd->rsp.arg[0]) { case QLCNIC_RCODE_INVALID_ARGS: - dev_err(&pdev->dev, "CDRP invalid args: 0x%x.\n", - cmd->rsp.cmd); + fmt = "CDRP invalid args: [%d]\n"; break; case QLCNIC_RCODE_NOT_SUPPORTED: case QLCNIC_RCODE_NOT_IMPL: - dev_err(&pdev->dev, - "CDRP command not supported: 0x%x.\n", - cmd->rsp.cmd); + fmt = "CDRP command not supported: [%d]\n"; break; case QLCNIC_RCODE_NOT_PERMITTED: - dev_err(&pdev->dev, - "CDRP requested action not permitted: 0x%x.\n", - cmd->rsp.cmd); + fmt = "CDRP requested action not permitted: [%d]\n"; break; case QLCNIC_RCODE_INVALID: - dev_err(&pdev->dev, - "CDRP invalid or unknown cmd received: 0x%x.\n", - cmd->rsp.cmd); + fmt = "CDRP invalid or unknown cmd received: [%d]\n"; break; case QLCNIC_RCODE_TIMEOUT: - dev_err(&pdev->dev, "CDRP command timeout: 0x%x.\n", - cmd->rsp.cmd); + fmt = "CDRP command timeout: [%d]\n"; break; default: - dev_err(&pdev->dev, "CDRP command failed: 0x%x.\n", - cmd->rsp.cmd); + fmt = "CDRP command failed: [%d]\n"; + break; } - } else if (rsp == QLCNIC_CDRP_RSP_OK) { - cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS; - if (cmd->rsp.arg2) - cmd->rsp.arg2 = QLCRD32(adapter, - QLCNIC_ARG2_CRB_OFFSET); - if (cmd->rsp.arg3) - cmd->rsp.arg3 = QLCRD32(adapter, - QLCNIC_ARG3_CRB_OFFSET); - } - if (cmd->rsp.arg1) - cmd->rsp.arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); + dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]); + } else if (rsp == QLCNIC_CDRP_RSP_OK) + cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; + + for (i = 1; i < cmd->rsp.num; i++) + cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i)); /* Release semaphore */ qlcnic_api_unlock(adapter); - -} - -static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size) -{ - uint64_t sum = 0; - int count = temp_size / sizeof(uint32_t); - while (count-- > 0) - sum += *temp_buffer++; - while (sum >> 32) - sum = (sum & 0xFFFFFFFF) + (sum >> 32); - return ~sum; + return cmd->rsp.arg[0]; } -int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) +int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter) { - int err, i; - void *tmp_addr; - u32 temp_size, version, csum, *template; - __le32 *tmp_buf; struct qlcnic_cmd_args cmd; - struct qlcnic_hardware_context *ahw; - struct qlcnic_dump_template_hdr *tmpl_hdr; - dma_addr_t tmp_addr_t = 0; - - ahw = adapter->ahw; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.cmd = QLCNIC_CDRP_CMD_TEMP_SIZE; - memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd)); - qlcnic_issue_cmd(adapter, &cmd); - if (cmd.rsp.cmd != QLCNIC_RCODE_SUCCESS) { - dev_info(&adapter->pdev->dev, - "Can't get template size %d\n", cmd.rsp.cmd); - err = -EIO; - return err; - } - temp_size = cmd.rsp.arg2; - version = cmd.rsp.arg3; - dev_info(&adapter->pdev->dev, - "minidump template version = 0x%x", version); - if (!temp_size) - return -EIO; + u32 arg1, arg2, arg3; + char drv_string[12]; + int err = 0; - tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, - &tmp_addr_t, GFP_KERNEL); - if (!tmp_addr) { - dev_err(&adapter->pdev->dev, - "Can't get memory for FW dump template\n"); - return -ENOMEM; - } - memset(&cmd.rsp, 0, sizeof(struct _cdrp_cmd)); - cmd.req.cmd = QLCNIC_CDRP_CMD_GET_TEMP_HDR; - cmd.req.arg1 = LSD(tmp_addr_t); - cmd.req.arg2 = MSD(tmp_addr_t); - cmd.req.arg3 = temp_size; - qlcnic_issue_cmd(adapter, &cmd); - - err = cmd.rsp.cmd; - if (err != QLCNIC_RCODE_SUCCESS) { - dev_err(&adapter->pdev->dev, - "Failed to get mini dump template header %d\n", err); - err = -EIO; - goto error; - } - ahw->fw_dump.tmpl_hdr = vzalloc(temp_size); - if (!ahw->fw_dump.tmpl_hdr) { - err = -EIO; - goto error; - } - tmp_buf = tmp_addr; - template = (u32 *) ahw->fw_dump.tmpl_hdr; - for (i = 0; i < temp_size/sizeof(u32); i++) - *template++ = __le32_to_cpu(*tmp_buf++); + memset(drv_string, 0, sizeof(drv_string)); + snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d", + _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR, + _QLCNIC_LINUX_SUBVERSION); - csum = qlcnic_temp_checksum((u32 *)ahw->fw_dump.tmpl_hdr, temp_size); - if (csum) { - dev_err(&adapter->pdev->dev, - "Template header checksum validation failed\n"); - err = -EIO; - goto error; + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_DRV_VER); + memcpy(&arg1, drv_string, sizeof(u32)); + memcpy(&arg2, drv_string + 4, sizeof(u32)); + memcpy(&arg3, drv_string + 8, sizeof(u32)); + + cmd.req.arg[1] = arg1; + cmd.req.arg[2] = arg2; + cmd.req.arg[3] = arg3; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_info(&adapter->pdev->dev, + "Failed to set driver version in firmware\n"); + return -EIO; } - tmpl_hdr = ahw->fw_dump.tmpl_hdr; - tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; - ahw->fw_dump.enable = 1; -error: - dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t); - return err; + return 0; } int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) { + int err = 0; struct qlcnic_cmd_args cmd; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.cmd = QLCNIC_CDRP_CMD_SET_MTU; - cmd.req.arg1 = recv_ctx->context_id; - cmd.req.arg2 = mtu; - cmd.req.arg3 = 0; - if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { - qlcnic_issue_cmd(adapter, &cmd); - if (cmd.rsp.cmd) { - dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); - return -EIO; - } - } + if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE) + return err; + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU); + cmd.req.arg[1] = recv_ctx->context_id; + cmd.req.arg[2] = mtu; - return 0; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); + err = -EIO; + } + qlcnic_free_mbx_args(&cmd); + return err; } -static int -qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) +int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) { void *addr; struct qlcnic_hostrq_rx_ctx *prq; @@ -242,10 +251,10 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) u64 phys_addr; u8 i, nrds_rings, nsds_rings; + u16 temp_u16; size_t rq_size, rsp_size; u32 cap, reg, val, reg2; int err; - u16 temp; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; @@ -279,11 +288,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) | QLCNIC_CAP0_VALIDOFF); cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); - if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) - cap |= QLCNIC_CAP0_LRO_MSS; - - temp = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler); - prq->valid_field_offset = cpu_to_le16(temp); + temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler); + prq->valid_field_offset = cpu_to_le16(temp_u16); prq->txrx_sds_binding = nsds_rings - 1; prq->capabilities[0] = cpu_to_le32(cap); @@ -329,20 +335,17 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) } phys_addr = hostrq_phys_addr; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.arg1 = (u32) (phys_addr >> 32); - cmd.req.arg2 = (u32) (phys_addr & 0xffffffff); - cmd.req.arg3 = rq_size; - cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_RX_CTX; - qlcnic_issue_cmd(adapter, &cmd); - err = cmd.rsp.cmd; + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); + cmd.req.arg[1] = MSD(phys_addr); + cmd.req.arg[2] = LSD(phys_addr); + cmd.req.arg[3] = rq_size; + err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to create rx ctx in firmware%d\n", err); goto out_free_rsp; } - prsp_rds = ((struct qlcnic_cardrsp_rds_ring *) &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); @@ -373,6 +376,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) out_free_rsp: dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, cardrsp_phys_addr); + qlcnic_free_mbx_args(&cmd); out_free_rq: dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); return err; @@ -381,24 +385,24 @@ out_free_rq: static void qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) { + int err; struct qlcnic_cmd_args cmd; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.arg1 = recv_ctx->context_id; - cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET; - cmd.req.arg3 = 0; - cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_RX_CTX; - qlcnic_issue_cmd(adapter, &cmd); - if (cmd.rsp.cmd) + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX); + cmd.req.arg[1] = recv_ctx->context_id; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) dev_err(&adapter->pdev->dev, "Failed to destroy rx ctx in firmware\n"); recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; + qlcnic_free_mbx_args(&cmd); } -static int -qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) +int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring, + int ring) { struct qlcnic_hostrq_tx_ctx *prq; struct qlcnic_hostrq_cds_ring *prq_cds; @@ -410,7 +414,6 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) int err; u64 phys_addr; dma_addr_t rq_phys_addr, rsp_phys_addr; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; /* reset host resources */ tx_ring->producer = 0; @@ -445,9 +448,9 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) prq->host_int_crb_mode = cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); + prq->msi_index = 0; prq->interrupt_ctl = 0; - prq->msi_index = 0; prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); prq_cds = &prq->cds_ring; @@ -456,19 +459,17 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); phys_addr = rq_phys_addr; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.arg1 = (u32)(phys_addr >> 32); - cmd.req.arg2 = ((u32)phys_addr & 0xffffffff); - cmd.req.arg3 = rq_size; - cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_TX_CTX; - qlcnic_issue_cmd(adapter, &cmd); - err = cmd.rsp.cmd; + + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + cmd.req.arg[1] = MSD(phys_addr); + cmd.req.arg[2] = LSD(phys_addr); + cmd.req.arg[3] = rq_size; + err = qlcnic_issue_cmd(adapter, &cmd); if (err == QLCNIC_RCODE_SUCCESS) { temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; - - adapter->tx_ring->ctx_id = le16_to_cpu(prsp->context_id); + tx_ring->ctx_id = le16_to_cpu(prsp->context_id); } else { dev_err(&adapter->pdev->dev, "Failed to create tx ctx in firmware%d\n", err); @@ -476,76 +477,81 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) } dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, - rsp_phys_addr); + rsp_phys_addr); out_free_rq: dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); + qlcnic_free_mbx_args(&cmd); return err; } static void -qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter) +qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) { struct qlcnic_cmd_args cmd; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.arg1 = adapter->tx_ring->ctx_id; - cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET; - cmd.req.arg3 = 0; - cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_TX_CTX; - qlcnic_issue_cmd(adapter, &cmd); - if (cmd.rsp.cmd) + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); + cmd.req.arg[1] = tx_ring->ctx_id; + if (qlcnic_issue_cmd(adapter, &cmd)) dev_err(&adapter->pdev->dev, "Failed to destroy tx ctx in firmware\n"); + qlcnic_free_mbx_args(&cmd); } int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) { + int err; struct qlcnic_cmd_args cmd; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.arg1 = config; - cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIG_PORT; - qlcnic_issue_cmd(adapter, &cmd); - - return cmd.rsp.cmd; + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT); + cmd.req.arg[1] = config; + err = qlcnic_issue_cmd(adapter, &cmd); + qlcnic_free_mbx_args(&cmd); + return err; } int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) { void *addr; - int err; - int ring; + int err, ring; struct qlcnic_recv_context *recv_ctx; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; + __le32 *ptr; struct pci_dev *pdev = adapter->pdev; recv_ctx = adapter->recv_ctx; - tx_ring = adapter->tx_ring; - tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev, - sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL); - if (tx_ring->hw_consumer == NULL) { - dev_err(&pdev->dev, "failed to allocate tx consumer\n"); - return -ENOMEM; - } + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), + &tx_ring->hw_cons_phys_addr, + GFP_KERNEL); - /* cmd desc ring */ - addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), - &tx_ring->phys_addr, GFP_KERNEL); + if (ptr == NULL) { + dev_err(&pdev->dev, "failed to allocate tx consumer\n"); + return -ENOMEM; + } + tx_ring->hw_consumer = ptr; + /* cmd desc ring */ + addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), + &tx_ring->phys_addr, + GFP_KERNEL); - if (addr == NULL) { - dev_err(&pdev->dev, "failed to allocate tx desc ring\n"); - err = -ENOMEM; - goto err_out_free; - } + if (addr == NULL) { + dev_err(&pdev->dev, + "failed to allocate tx desc ring\n"); + err = -ENOMEM; + goto err_out_free; + } - tx_ring->desc_head = addr; + tx_ring->desc_head = addr; + } for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; @@ -584,36 +590,70 @@ err_out_free: return err; } - -int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter) +int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) { - int err; + int i, err, ring; - if (adapter->flags & QLCNIC_NEED_FLR) { - pci_reset_function(adapter->pdev); - adapter->flags &= ~QLCNIC_NEED_FLR; + if (dev->flags & QLCNIC_NEED_FLR) { + pci_reset_function(dev->pdev); + dev->flags &= ~QLCNIC_NEED_FLR; } - err = qlcnic_fw_cmd_create_rx_ctx(adapter); - if (err) - return err; + if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { + if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) { + err = qlcnic_83xx_config_intrpt(dev, 1); + if (err) + return err; + } + } - err = qlcnic_fw_cmd_create_tx_ctx(adapter); - if (err) { - qlcnic_fw_cmd_destroy_rx_ctx(adapter); - return err; + err = qlcnic_fw_cmd_create_rx_ctx(dev); + if (err) + goto err_out; + + for (ring = 0; ring < dev->max_drv_tx_rings; ring++) { + err = qlcnic_fw_cmd_create_tx_ctx(dev, + &dev->tx_ring[ring], + ring); + if (err) { + qlcnic_fw_cmd_destroy_rx_ctx(dev); + if (ring == 0) + goto err_out; + + for (i = 0; i < ring; i++) + qlcnic_fw_cmd_destroy_tx_ctx(dev, + &dev->tx_ring[i]); + + goto err_out; + } } - set_bit(__QLCNIC_FW_ATTACHED, &adapter->state); + set_bit(__QLCNIC_FW_ATTACHED, &dev->state); return 0; + +err_out: + if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { + if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) + qlcnic_83xx_config_intrpt(dev, 0); + } + return err; } void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) { + int ring; + if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { qlcnic_fw_cmd_destroy_rx_ctx(adapter); - qlcnic_fw_cmd_destroy_tx_ctx(adapter); - + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) + qlcnic_fw_cmd_destroy_tx_ctx(adapter, + &adapter->tx_ring[ring]); + + if (qlcnic_83xx_check(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED)) { + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) + qlcnic_83xx_config_intrpt(adapter, 0); + } /* Allow dma queues to drain after context reset */ mdelay(20); } @@ -629,20 +669,23 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) recv_ctx = adapter->recv_ctx; - tx_ring = adapter->tx_ring; - if (tx_ring->hw_consumer != NULL) { - dma_free_coherent(&adapter->pdev->dev, - sizeof(u32), - tx_ring->hw_consumer, - tx_ring->hw_cons_phys_addr); - tx_ring->hw_consumer = NULL; - } + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + if (tx_ring->hw_consumer != NULL) { + dma_free_coherent(&adapter->pdev->dev, sizeof(u32), + tx_ring->hw_consumer, + tx_ring->hw_cons_phys_addr); - if (tx_ring->desc_head != NULL) { - dma_free_coherent(&adapter->pdev->dev, - TX_DESC_RINGSIZE(tx_ring), - tx_ring->desc_head, tx_ring->phys_addr); - tx_ring->desc_head = NULL; + tx_ring->hw_consumer = NULL; + } + + if (tx_ring->desc_head != NULL) { + dma_free_coherent(&adapter->pdev->dev, + TX_DESC_RINGSIZE(tx_ring), + tx_ring->desc_head, + tx_ring->phys_addr); + tx_ring->desc_head = NULL; + } } for (ring = 0; ring < adapter->max_rds_rings; ring++) { @@ -671,40 +714,43 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) } -/* Get MAC address of a NIC partition */ -int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) +int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) { - int err; + int err, i; struct qlcnic_cmd_args cmd; + u32 mac_low, mac_high; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.arg1 = adapter->ahw->pci_func | BIT_8; - cmd.req.cmd = QLCNIC_CDRP_CMD_MAC_ADDRESS; - cmd.rsp.arg1 = cmd.rsp.arg2 = 1; - qlcnic_issue_cmd(adapter, &cmd); - err = cmd.rsp.cmd; + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); + cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8; + err = qlcnic_issue_cmd(adapter, &cmd); - if (err == QLCNIC_RCODE_SUCCESS) - qlcnic_fetch_mac(cmd.rsp.arg1, cmd.rsp.arg2, 0, mac); - else { + if (err == QLCNIC_RCODE_SUCCESS) { + mac_low = cmd.rsp.arg[1]; + mac_high = cmd.rsp.arg[2]; + + for (i = 0; i < 2; i++) + mac[i] = (u8) (mac_high >> ((1 - i) * 8)); + for (i = 2; i < 6; i++) + mac[i] = (u8) (mac_low >> ((5 - i) * 8)); + } else { dev_err(&adapter->pdev->dev, "Failed to get mac address%d\n", err); err = -EIO; } - + qlcnic_free_mbx_args(&cmd); return err; } /* Get info of a NIC partition */ -int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, - struct qlcnic_info *npar_info, u8 func_id) +int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info, u8 func_id) { int err; dma_addr_t nic_dma_t; - struct qlcnic_info_le *nic_info; + const struct qlcnic_info_le *nic_info; void *nic_info_addr; struct qlcnic_cmd_args cmd; - size_t nic_size = sizeof(struct qlcnic_info_le); + size_t nic_size = sizeof(struct qlcnic_info_le); nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, &nic_dma_t, GFP_KERNEL); @@ -713,47 +759,39 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, memset(nic_info_addr, 0, nic_size); nic_info = nic_info_addr; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.cmd = QLCNIC_CDRP_CMD_GET_NIC_INFO; - cmd.req.arg1 = MSD(nic_dma_t); - cmd.req.arg2 = LSD(nic_dma_t); - cmd.req.arg3 = (func_id << 16 | nic_size); - qlcnic_issue_cmd(adapter, &cmd); - err = cmd.rsp.cmd; - if (err == QLCNIC_RCODE_SUCCESS) { + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); + cmd.req.arg[1] = MSD(nic_dma_t); + cmd.req.arg[2] = LSD(nic_dma_t); + cmd.req.arg[3] = (func_id << 16 | nic_size); + err = qlcnic_issue_cmd(adapter, &cmd); + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to get nic info%d\n", err); + err = -EIO; + } else { npar_info->pci_func = le16_to_cpu(nic_info->pci_func); npar_info->op_mode = le16_to_cpu(nic_info->op_mode); + npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); + npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); npar_info->phys_port = le16_to_cpu(nic_info->phys_port); npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); - npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); - npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); npar_info->capabilities = le32_to_cpu(nic_info->capabilities); npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); - - dev_info(&adapter->pdev->dev, - "phy port: %d switch_mode: %d,\n" - "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n" - "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n", - npar_info->phys_port, npar_info->switch_mode, - npar_info->max_tx_ques, npar_info->max_rx_ques, - npar_info->min_tx_bw, npar_info->max_tx_bw, - npar_info->max_mtu, npar_info->capabilities); - } else { - dev_err(&adapter->pdev->dev, - "Failed to get nic info%d\n", err); - err = -EIO; } dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, - nic_dma_t); + nic_dma_t); + qlcnic_free_mbx_args(&cmd); + return err; } /* Configure a NIC partition */ -int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic) +int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *nic) { int err = -EIO; dma_addr_t nic_dma_t; @@ -784,13 +822,11 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic) nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); - memset(&cmd, 0, sizeof(cmd)); - cmd.req.cmd = QLCNIC_CDRP_CMD_SET_NIC_INFO; - cmd.req.arg1 = MSD(nic_dma_t); - cmd.req.arg2 = LSD(nic_dma_t); - cmd.req.arg3 = ((nic->pci_func << 16) | nic_size); - qlcnic_issue_cmd(adapter, &cmd); - err = cmd.rsp.cmd; + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + cmd.req.arg[1] = MSD(nic_dma_t); + cmd.req.arg[2] = LSD(nic_dma_t); + cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size); + err = qlcnic_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) { dev_err(&adapter->pdev->dev, @@ -800,12 +836,14 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic) dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, nic_dma_t); + qlcnic_free_mbx_args(&cmd); + return err; } /* Get PCI Info of a partition */ -int qlcnic_get_pci_info(struct qlcnic_adapter *adapter, - struct qlcnic_pci_info *pci_info) +int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, + struct qlcnic_pci_info *pci_info) { int err = 0, i; struct qlcnic_cmd_args cmd; @@ -822,13 +860,11 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter, memset(pci_info_addr, 0, pci_size); npar = pci_info_addr; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.cmd = QLCNIC_CDRP_CMD_GET_PCI_INFO; - cmd.req.arg1 = MSD(pci_info_dma_t); - cmd.req.arg2 = LSD(pci_info_dma_t); - cmd.req.arg3 = pci_size; - qlcnic_issue_cmd(adapter, &cmd); - err = cmd.rsp.cmd; + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); + cmd.req.arg[1] = MSD(pci_info_dma_t); + cmd.req.arg[2] = LSD(pci_info_dma_t); + cmd.req.arg[3] = pci_size; + err = qlcnic_issue_cmd(adapter, &cmd); adapter->ahw->act_pci_func = 0; if (err == QLCNIC_RCODE_SUCCESS) { @@ -854,6 +890,8 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter, dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, pci_info_dma_t); + qlcnic_free_mbx_args(&cmd); + return err; } @@ -872,21 +910,19 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, arg1 = id | (enable_mirroring ? BIT_4 : 0); arg1 |= pci_func << 8; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.cmd = QLCNIC_CDRP_CMD_SET_PORTMIRRORING; - cmd.req.arg1 = arg1; - qlcnic_issue_cmd(adapter, &cmd); - err = cmd.rsp.cmd; + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING); + cmd.req.arg[1] = arg1; + err = qlcnic_issue_cmd(adapter, &cmd); - if (err != QLCNIC_RCODE_SUCCESS) { + if (err != QLCNIC_RCODE_SUCCESS) dev_err(&adapter->pdev->dev, "Failed to configure port mirroring%d on eswitch:%d\n", pci_func, id); - } else { + else dev_info(&adapter->pdev->dev, "Configured eSwitch %d for port mirroring:%d\n", id, pci_func); - } + qlcnic_free_mbx_args(&cmd); return err; } @@ -923,13 +959,11 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; arg1 |= rx_tx << 15 | stats_size << 16; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS; - cmd.req.arg1 = arg1; - cmd.req.arg2 = MSD(stats_dma_t); - cmd.req.arg3 = LSD(stats_dma_t); - qlcnic_issue_cmd(adapter, &cmd); - err = cmd.rsp.cmd; + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); + cmd.req.arg[1] = arg1; + cmd.req.arg[2] = MSD(stats_dma_t); + cmd.req.arg[3] = LSD(stats_dma_t); + err = qlcnic_issue_cmd(adapter, &cmd); if (!err) { stats = stats_addr; @@ -949,6 +983,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, stats_dma_t); + qlcnic_free_mbx_args(&cmd); + return err; } @@ -963,6 +999,9 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, void *stats_addr; int err; + if (mac_stats == NULL) + return -ENOMEM; + stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, &stats_dma_t, GFP_KERNEL); if (!stats_addr) { @@ -971,15 +1010,11 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, return -ENOMEM; } memset(stats_addr, 0, stats_size); - memset(&cmd, 0, sizeof(cmd)); - cmd.req.cmd = QLCNIC_CDRP_CMD_GET_MAC_STATS; - cmd.req.arg1 = stats_size << 16; - cmd.req.arg2 = MSD(stats_dma_t); - cmd.req.arg3 = LSD(stats_dma_t); - - qlcnic_issue_cmd(adapter, &cmd); - err = cmd.rsp.cmd; - + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); + cmd.req.arg[1] = stats_size << 16; + cmd.req.arg[2] = MSD(stats_dma_t); + cmd.req.arg[3] = LSD(stats_dma_t); + err = qlcnic_issue_cmd(adapter, &cmd); if (!err) { stats = stats_addr; mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames); @@ -1001,10 +1036,16 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber); mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped); mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error); + } else { + dev_err(&adapter->pdev->dev, + "%s: Get mac stats failed, err=%d.\n", __func__, err); } dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, stats_dma_t); + + qlcnic_free_mbx_args(&cmd); + return err; } @@ -1065,7 +1106,7 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, const u8 port, const u8 rx_tx) { - + int err; u32 arg1; struct qlcnic_cmd_args cmd; @@ -1088,15 +1129,16 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; arg1 |= BIT_14 | rx_tx << 15; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS; - cmd.req.arg1 = arg1; - qlcnic_issue_cmd(adapter, &cmd); - return cmd.rsp.cmd; + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); + cmd.req.arg[1] = arg1; + err = qlcnic_issue_cmd(adapter, &cmd); + qlcnic_free_mbx_args(&cmd); + return err; err_ret: - dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d" - "rx_ctx=%d\n", func_esw, port, rx_tx); + dev_err(&adapter->pdev->dev, + "Invalid args func_esw %d port %d rx_ctx %d\n", + func_esw, port, rx_tx); return -EIO; } @@ -1109,22 +1151,21 @@ __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, u8 pci_func; pci_func = (*arg1 >> 8); - cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG; - cmd.req.arg1 = *arg1; - cmd.rsp.arg1 = cmd.rsp.arg2 = 1; - qlcnic_issue_cmd(adapter, &cmd); - *arg1 = cmd.rsp.arg1; - *arg2 = cmd.rsp.arg2; - err = cmd.rsp.cmd; + qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG); + cmd.req.arg[1] = *arg1; + err = qlcnic_issue_cmd(adapter, &cmd); + *arg1 = cmd.rsp.arg[1]; + *arg2 = cmd.rsp.arg[2]; + qlcnic_free_mbx_args(&cmd); - if (err == QLCNIC_RCODE_SUCCESS) { + if (err == QLCNIC_RCODE_SUCCESS) dev_info(&adapter->pdev->dev, - "eSwitch port config for pci func %d\n", pci_func); - } else { + "eSwitch port config for pci func %d\n", pci_func); + else dev_err(&adapter->pdev->dev, "Failed to get eswitch port config for pci func %d\n", pci_func); - } return err; } /* Configure eSwitch port @@ -1189,20 +1230,18 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, return err; } - memset(&cmd, 0, sizeof(cmd)); - cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH; - cmd.req.arg1 = arg1; - cmd.req.arg2 = arg2; - qlcnic_issue_cmd(adapter, &cmd); + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH); + cmd.req.arg[1] = arg1; + cmd.req.arg[2] = arg2; + err = qlcnic_issue_cmd(adapter, &cmd); + qlcnic_free_mbx_args(&cmd); - err = cmd.rsp.cmd; - if (err != QLCNIC_RCODE_SUCCESS) { + if (err != QLCNIC_RCODE_SUCCESS) dev_err(&adapter->pdev->dev, "Failed to configure eswitch pci func %d\n", pci_func); - } else { + else dev_info(&adapter->pdev->dev, - "Configured eSwitch for pci func %d\n", pci_func); - } + "Configured eSwitch for pci func %d\n", pci_func); return err; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 74b98110c5b4..5641f8ec49ab 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -1,6 +1,6 @@ /* * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation + * Copyright (c) 2009-2013 QLogic Corporation * * See LICENSE.qlcnic for copyright and licensing details. */ @@ -22,42 +22,37 @@ struct qlcnic_stats { #define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m) #define QLC_OFF(m) offsetof(struct qlcnic_adapter, m) +static const u32 qlcnic_fw_dump_level[] = { + 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff +}; static const struct qlcnic_stats qlcnic_gstrings_stats[] = { - {"xmit_called", - QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)}, - {"xmit_finished", - QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)}, - {"rx_dropped", - QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)}, - {"tx_dropped", - QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)}, - {"csummed", - QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)}, - {"rx_pkts", - QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)}, - {"lro_pkts", - QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)}, - {"rx_bytes", - QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)}, - {"tx_bytes", - QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)}, - {"lrobytes", - QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)}, - {"lso_frames", - QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)}, - {"xmit_on", - QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)}, - {"xmit_off", - QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, + {"xmit_called", QLC_SIZEOF(stats.xmitcalled), + QLC_OFF(stats.xmitcalled)}, + {"xmit_finished", QLC_SIZEOF(stats.xmitfinished), + QLC_OFF(stats.xmitfinished)}, + {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)}, + {"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)}, + {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)}, + {"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)}, + {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)}, + {"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)}, + {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)}, + {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)}, + {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)}, + {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)}, + {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), - QLC_OFF(stats.skb_alloc_failure)}, - {"null rxbuf", - QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)}, + QLC_OFF(stats.skb_alloc_failure)}, + {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)}, {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error), QLC_OFF(stats.rx_dma_map_error)}, {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error), QLC_OFF(stats.tx_dma_map_error)}, + {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun), + QLC_OFF(stats.mac_filter_limit_overrun)}, + {"spurious intr", QLC_SIZEOF(stats.spurious_intr), + QLC_OFF(stats.spurious_intr)}, }; @@ -78,7 +73,15 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { "tx numbytes", }; -static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = { +static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = { + "ctx_tx_bytes", + "ctx_tx_pkts", + "ctx_tx_errors", + "ctx_tx_dropped_pkts", + "ctx_tx_num_buffers", +}; + +static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = { "mac_tx_frames", "mac_tx_bytes", "mac_tx_mcast_pkts", @@ -110,35 +113,70 @@ static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = { "mac_rx_length_large", "mac_rx_jabber", "mac_rx_dropped", - "mac_rx_crc_error", + "mac_crc_error", "mac_align_error", }; -#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) -#define QLCNIC_MAC_STATS_LEN ARRAY_SIZE(qlcnic_mac_stats_strings) -#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats) -#define QLCNIC_TOTAL_STATS_LEN QLCNIC_STATS_LEN + QLCNIC_MAC_STATS_LEN +#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) +static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = { + "ctx_rx_bytes", + "ctx_rx_pkts", + "ctx_lro_pkt_cnt", + "ctx_ip_csum_error", + "ctx_rx_pkts_wo_ctx", + "ctx_rx_pkts_dropped_wo_sts", + "ctx_rx_osized_pkts", + "ctx_rx_pkts_dropped_wo_rds", + "ctx_rx_unexpected_mcast_pkts", + "ctx_invalid_mac_address", + "ctx_rx_rds_ring_prim_attemoted", + "ctx_rx_rds_ring_prim_success", + "ctx_num_lro_flows_added", + "ctx_num_lro_flows_removed", + "ctx_num_lro_flows_active", + "ctx_pkts_dropped_unknown", +}; static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { "Register_Test_on_offline", "Link_Test_on_offline", "Interrupt_Test_offline", "Internal_Loopback_offline", - "External_Loopback_offline" + "EEPROM_Test_offline" }; #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) +static inline int qlcnic_82xx_statistics(void) +{ + return QLCNIC_STATS_LEN + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); +} + +static inline int qlcnic_83xx_statistics(void) +{ + return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + + ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); +} + +static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) +{ + if (qlcnic_82xx_check(adapter)) + return qlcnic_82xx_statistics(); + else if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_statistics(); + else + return -1; +} + #define QLCNIC_RING_REGS_COUNT 20 #define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32)) #define QLCNIC_MAX_EEPROM_LEN 1024 static const u32 diag_registers[] = { - CRB_CMDPEG_STATE, - CRB_RCVPEG_STATE, - CRB_XG_STATE_P3P, - CRB_FW_CAPABILITIES_1, - ISR_INT_STATE_REG, + QLCNIC_CMDPEG_STATE, + QLCNIC_RCVPEG_STATE, + QLCNIC_FW_CAPABILITIES, QLCNIC_CRB_DRV_ACTIVE, QLCNIC_CRB_DEV_STATE, QLCNIC_CRB_DRV_STATE, @@ -148,6 +186,13 @@ static const u32 diag_registers[] = { QLCNIC_PEG_ALIVE_COUNTER, QLCNIC_PEG_HALT_STATUS1, QLCNIC_PEG_HALT_STATUS2, + -1 +}; + + +static const u32 ext_diag_registers[] = { + CRB_XG_STATE_P3P, + ISR_INT_STATE_REG, QLCNIC_CRB_PEG_NET_0+0x3c, QLCNIC_CRB_PEG_NET_1+0x3c, QLCNIC_CRB_PEG_NET_2+0x3c, @@ -156,12 +201,19 @@ static const u32 diag_registers[] = { }; #define QLCNIC_MGMT_API_VERSION 2 -#define QLCNIC_DEV_INFO_SIZE 1 -#define QLCNIC_ETHTOOL_REGS_VER 2 +#define QLCNIC_ETHTOOL_REGS_VER 3 + static int qlcnic_get_regs_len(struct net_device *dev) { - return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN + - QLCNIC_DEV_INFO_SIZE + 1; + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 len; + + if (qlcnic_83xx_check(adapter)) + len = qlcnic_83xx_get_regs_len(adapter); + else + len = sizeof(ext_diag_registers) + sizeof(diag_registers); + + return QLCNIC_RING_REGS_LEN + len + QLCNIC_DEV_INFO_SIZE + 1; } static int qlcnic_get_eeprom_len(struct net_device *dev) @@ -174,10 +226,9 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { struct qlcnic_adapter *adapter = netdev_priv(dev); u32 fw_major, fw_minor, fw_build; - - fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); - fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); - fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB); + fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR); + fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR); + fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", fw_major, fw_minor, fw_build); @@ -192,7 +243,10 @@ static int qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct qlcnic_adapter *adapter = netdev_priv(dev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 speed, reg; int check_sfp_module = 0; + u16 pcifn = ahw->pci_func; /* read which mode */ if (adapter->ahw->port_type == QLCNIC_GBE) { @@ -213,9 +267,12 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) ecmd->autoneg = adapter->ahw->link_autoneg; } else if (adapter->ahw->port_type == QLCNIC_XGBE) { - u32 val; + u32 val = 0; + if (qlcnic_83xx_check(adapter)) + qlcnic_83xx_get_settings(adapter); + else + val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); - val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); if (val == QLCNIC_PORT_MODE_802_3_AP) { ecmd->supported = SUPPORTED_1000baseT_Full; ecmd->advertising = ADVERTISED_1000baseT_Full; @@ -225,6 +282,12 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) } if (netif_running(dev) && adapter->ahw->has_link_events) { + if (qlcnic_82xx_check(adapter)) { + reg = QLCRD32(adapter, + P3P_LINK_SPEED_REG(pcifn)); + speed = P3P_LINK_SPEED_VAL(pcifn, reg); + ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; + } ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed); ecmd->autoneg = adapter->ahw->link_autoneg; ecmd->duplex = adapter->ahw->link_duplex; @@ -294,6 +357,13 @@ skip: ecmd->port = PORT_TP; } break; + case QLCNIC_BRDTYPE_83XX_10G: + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); + ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); + ecmd->port = PORT_FIBRE; + check_sfp_module = netif_running(dev) && ahw->has_link_events; + break; default: dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", adapter->ahw->board_type); @@ -321,16 +391,10 @@ skip: return 0; } -static int -qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int qlcnic_set_port_config(struct qlcnic_adapter *adapter, + struct ethtool_cmd *ecmd) { - u32 config = 0; - u32 ret = 0; - struct qlcnic_adapter *adapter = netdev_priv(dev); - - if (adapter->ahw->port_type != QLCNIC_GBE) - return -EOPNOTSUPP; - + u32 ret = 0, config = 0; /* read which mode */ if (ecmd->duplex) config |= 0x1; @@ -358,6 +422,24 @@ qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return -EOPNOTSUPP; else if (ret) return -EIO; + return ret; +} + +static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + u32 ret = 0; + struct qlcnic_adapter *adapter = netdev_priv(dev); + + if (adapter->ahw->port_type != QLCNIC_GBE) + return -EOPNOTSUPP; + + if (qlcnic_83xx_check(adapter)) + ret = qlcnic_83xx_set_settings(adapter, ecmd); + else + ret = qlcnic_set_port_config(adapter, ecmd); + + if (!ret) + return ret; adapter->ahw->link_speed = ethtool_cmd_speed(ecmd); adapter->ahw->link_duplex = ecmd->duplex; @@ -370,6 +452,19 @@ qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return dev->netdev_ops->ndo_open(dev); } +static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter, + u32 *regs_buff) +{ + int i, j = 0; + + for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++) + regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]); + j = 0; + while (ext_diag_registers[j] != -1) + regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++]); + return i; +} + static void qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { @@ -377,17 +472,20 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_sds_ring *sds_ring; u32 *regs_buff = p; - int ring, i = 0, j = 0; + int ring, i = 0; memset(p, 0, qlcnic_get_regs_len(dev)); + regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) | (adapter->ahw->revision_id << 16) | (adapter->pdev)->device; regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff)); regs_buff[1] = QLCNIC_MGMT_API_VERSION; - for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++) - regs_buff[i] = QLCRD32(adapter, diag_registers[j]); + if (qlcnic_82xx_check(adapter)) + i = qlcnic_82xx_get_registers(adapter, regs_buff); + else + i = qlcnic_83xx_get_registers(adapter, regs_buff); if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) return; @@ -415,6 +513,10 @@ static u32 qlcnic_test_link(struct net_device *dev) struct qlcnic_adapter *adapter = netdev_priv(dev); u32 val; + if (qlcnic_83xx_check(adapter)) { + val = qlcnic_83xx_test_link(adapter); + return (val & 1) ? 0 : 1; + } val = QLCRD32(adapter, CRB_XG_STATE_P3P); val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val); return (val == XG_LINK_UP_P3P) ? 0 : 1; @@ -426,8 +528,10 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, { struct qlcnic_adapter *adapter = netdev_priv(dev); int offset; - int ret; + int ret = -1; + if (qlcnic_83xx_check(adapter)) + return 0; if (eeprom->len == 0) return -EINVAL; @@ -435,8 +539,9 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, ((adapter->pdev)->device << 16); offset = eeprom->offset; - ret = qlcnic_rom_fast_read_words(adapter, offset, bytes, - eeprom->len); + if (qlcnic_82xx_check(adapter)) + ret = qlcnic_rom_fast_read_words(adapter, offset, bytes, + eeprom->len); if (ret < 0) return ret; @@ -529,11 +634,11 @@ static int qlcnic_set_channels(struct net_device *dev, channel->tx_count != channel->max_tx) return -EINVAL; - err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count); + err = qlcnic_validate_max_rss(channel->max_rx, channel->rx_count); if (err) return err; - err = qlcnic_set_max_rss(adapter, channel->rx_count); + err = qlcnic_set_max_rss(adapter, channel->rx_count, 0); netdev_info(dev, "allocated 0x%x sds rings\n", adapter->max_sds_rings); return err; @@ -547,6 +652,10 @@ qlcnic_get_pauseparam(struct net_device *netdev, int port = adapter->ahw->physical_port; __u32 val; + if (qlcnic_83xx_check(adapter)) { + qlcnic_83xx_get_pauseparam(adapter, pause); + return; + } if (adapter->ahw->port_type == QLCNIC_GBE) { if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) return; @@ -592,6 +701,9 @@ qlcnic_set_pauseparam(struct net_device *netdev, int port = adapter->ahw->physical_port; __u32 val; + if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_set_pauseparam(adapter, pause); + /* read mode */ if (adapter->ahw->port_type == QLCNIC_GBE) { if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) @@ -606,6 +718,7 @@ qlcnic_set_pauseparam(struct net_device *netdev, QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val); + QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val); /* set autoneg */ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); switch (port) { @@ -668,6 +781,9 @@ static int qlcnic_reg_test(struct net_device *dev) struct qlcnic_adapter *adapter = netdev_priv(dev); u32 data_read; + if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_reg_test(adapter); + data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0)); if ((data_read & 0xffff) != adapter->pdev->vendor) return 1; @@ -675,16 +791,30 @@ static int qlcnic_reg_test(struct net_device *dev) return 0; } +static int qlcnic_eeprom_test(struct net_device *dev) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + + if (qlcnic_82xx_check(adapter)) + return 0; + + return qlcnic_83xx_flash_test(adapter); +} + static int qlcnic_get_sset_count(struct net_device *dev, int sset) { + int len; + struct qlcnic_adapter *adapter = netdev_priv(dev); switch (sset) { case ETH_SS_TEST: return QLCNIC_TEST_LEN; case ETH_SS_STATS: - if (adapter->flags & QLCNIC_ESWITCH_ENABLED) - return QLCNIC_TOTAL_STATS_LEN + QLCNIC_DEVICE_STATS_LEN; - return QLCNIC_TOTAL_STATS_LEN; + len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; + if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || + qlcnic_83xx_check(adapter)) + return len; + return qlcnic_82xx_statistics(); default: return -EOPNOTSUPP; } @@ -693,35 +823,36 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset) static int qlcnic_irq_test(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); - int max_sds_rings = adapter->max_sds_rings; - int ret; + struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_cmd_args cmd; + int ret, max_sds_rings = adapter->max_sds_rings; + + if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_interrupt_test(netdev); if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EIO; ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); if (ret) - goto clear_it; + goto clear_diag_irq; - adapter->ahw->diag_cnt = 0; - memset(&cmd, 0, sizeof(cmd)); - cmd.req.cmd = QLCNIC_CDRP_CMD_INTRPT_TEST; - cmd.req.arg1 = adapter->ahw->pci_func; - qlcnic_issue_cmd(adapter, &cmd); - ret = cmd.rsp.cmd; + ahw->diag_cnt = 0; + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + cmd.req.arg[1] = ahw->pci_func; + ret = qlcnic_issue_cmd(adapter, &cmd); if (ret) goto done; - msleep(10); - - ret = !adapter->ahw->diag_cnt; + usleep_range(1000, 12000); + ret = !ahw->diag_cnt; done: + qlcnic_free_mbx_args(&cmd); qlcnic_diag_free_res(netdev, max_sds_rings); -clear_it: +clear_diag_irq: adapter->max_sds_rings = max_sds_rings; clear_bit(__QLCNIC_RESETTING, &adapter->state); return ret; @@ -750,7 +881,7 @@ int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]) return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE); } -static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) +int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) { struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0]; @@ -761,11 +892,10 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE); qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); skb_put(skb, QLCNIC_ILB_PKT_SIZE); - adapter->ahw->diag_cnt = 0; qlcnic_xmit_frame(skb, adapter->netdev); - loop = 0; + do { msleep(1); qlcnic_process_rcv_ring_diag(sds_ring); @@ -776,42 +906,46 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) dev_kfree_skb_any(skb); if (!adapter->ahw->diag_cnt) - QLCDB(adapter, DRV, - "LB Test: packet #%d was not received\n", i + 1); + dev_warn(&adapter->pdev->dev, + "LB Test: packet #%d was not received\n", + i + 1); else cnt++; } if (cnt != i) { - dev_warn(&adapter->pdev->dev, "LB Test failed\n"); - if (mode != QLCNIC_ILB_MODE) { + dev_err(&adapter->pdev->dev, + "LB Test: failed, TX[%d], RX[%d]\n", i, cnt); + if (mode != QLCNIC_ILB_MODE) dev_warn(&adapter->pdev->dev, - "WARNING: Please make sure external" - "loopback connector is plugged in\n"); - } + "WARNING: Please check loopback cable\n"); return -1; } return 0; } -static int qlcnic_loopback_test(struct net_device *netdev, u8 mode) +int qlcnic_loopback_test(struct net_device *netdev, u8 mode) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int max_sds_rings = adapter->max_sds_rings; struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_hardware_context *ahw = adapter->ahw; int loop = 0; int ret; - if (!(adapter->ahw->capabilities & - QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) { - netdev_info(netdev, "Firmware is not loopback test capable\n"); + if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_loopback_test(netdev, mode); + + if (!(ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) { + dev_info(&adapter->pdev->dev, + "Firmware do not support loopback test\n"); return -EOPNOTSUPP; } - QLCDB(adapter, DRV, "%s loopback test in progress\n", - mode == QLCNIC_ILB_MODE ? "internal" : "external"); - if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { - netdev_warn(netdev, "Loopback test not supported for non " - "privilege function\n"); + dev_warn(&adapter->pdev->dev, "%s loopback test in progress\n", + mode == QLCNIC_ILB_MODE ? "internal" : "external"); + if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { + dev_warn(&adapter->pdev->dev, + "Loopback test not supported in nonprivileged mode\n"); return 0; } @@ -823,12 +957,11 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode) goto clear_it; sds_ring = &adapter->recv_ctx->sds_rings[0]; - ret = qlcnic_set_lb_mode(adapter, mode); if (ret) goto free_res; - adapter->ahw->diag_cnt = 0; + ahw->diag_cnt = 0; do { msleep(500); qlcnic_process_rcv_ring_diag(sds_ring); @@ -841,11 +974,11 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode) ret = adapter->ahw->diag_cnt; goto free_res; } - } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state)); + } while (!QLCNIC_IS_LB_CONFIGURED(ahw->loopback_state)); ret = qlcnic_do_lb_test(adapter, mode); - qlcnic_clear_lb_mode(adapter); + qlcnic_clear_lb_mode(adapter, mode); free_res: qlcnic_diag_free_res(netdev, max_sds_rings); @@ -878,20 +1011,18 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE); if (data[3]) eth_test->flags |= ETH_TEST_FL_FAILED; - if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) { - data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE); - if (data[4]) - eth_test->flags |= ETH_TEST_FL_FAILED; - eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; - } + + data[4] = qlcnic_eeprom_test(dev); + if (data[4]) + eth_test->flags |= ETH_TEST_FL_FAILED; } } static void -qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) +qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct qlcnic_adapter *adapter = netdev_priv(dev); - int index, i, j; + int index, i, num_stats; switch (stringset) { case ETH_SS_TEST: @@ -904,14 +1035,34 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) qlcnic_gstrings_stats[index].stat_string, ETH_GSTRING_LEN); } - for (j = 0; j < QLCNIC_MAC_STATS_LEN; index++, j++) { - memcpy(data + index * ETH_GSTRING_LEN, - qlcnic_mac_stats_strings[j], - ETH_GSTRING_LEN); + if (qlcnic_83xx_check(adapter)) { + num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings); + for (i = 0; i < num_stats; i++, index++) + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_83xx_tx_stats_strings[i], + ETH_GSTRING_LEN); + num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); + for (i = 0; i < num_stats; i++, index++) + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_83xx_mac_stats_strings[i], + ETH_GSTRING_LEN); + num_stats = ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); + for (i = 0; i < num_stats; i++, index++) + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_83xx_rx_stats_strings[i], + ETH_GSTRING_LEN); + return; + } else { + num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); + for (i = 0; i < num_stats; i++, index++) + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_83xx_mac_stats_strings[i], + ETH_GSTRING_LEN); } if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) return; - for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) { + num_stats = ARRAY_SIZE(qlcnic_device_gstrings_stats); + for (i = 0; i < num_stats; index++, i++) { memcpy(data + index * ETH_GSTRING_LEN, qlcnic_device_gstrings_stats[i], ETH_GSTRING_LEN); @@ -920,89 +1071,84 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) } static void -qlcnic_fill_stats(int *index, u64 *data, void *stats, int type) +qlcnic_fill_stats(u64 *data, void *stats, int type) { - int ind = *index; - if (type == QLCNIC_MAC_STATS) { struct qlcnic_mac_statistics *mac_stats = (struct qlcnic_mac_statistics *)stats; - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts); - data[ind++] = - QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts); - data[ind++] = - QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts); - data[ind++] = - QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts); - data[ind++] = - QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts); - data[ind++] = - QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts); - data[ind++] = - QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error); - data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_align_error); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_align_error); } else if (type == QLCNIC_ESW_STATS) { struct __qlcnic_esw_statistics *esw_stats = (struct __qlcnic_esw_statistics *)stats; - data[ind++] = QLCNIC_FILL_STATS(esw_stats->unicast_frames); - data[ind++] = QLCNIC_FILL_STATS(esw_stats->multicast_frames); - data[ind++] = QLCNIC_FILL_STATS(esw_stats->broadcast_frames); - data[ind++] = QLCNIC_FILL_STATS(esw_stats->dropped_frames); - data[ind++] = QLCNIC_FILL_STATS(esw_stats->errors); - data[ind++] = QLCNIC_FILL_STATS(esw_stats->local_frames); - data[ind++] = QLCNIC_FILL_STATS(esw_stats->numbytes); + *data++ = QLCNIC_FILL_STATS(esw_stats->unicast_frames); + *data++ = QLCNIC_FILL_STATS(esw_stats->multicast_frames); + *data++ = QLCNIC_FILL_STATS(esw_stats->broadcast_frames); + *data++ = QLCNIC_FILL_STATS(esw_stats->dropped_frames); + *data++ = QLCNIC_FILL_STATS(esw_stats->errors); + *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames); + *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes); } - - *index = ind; } -static void -qlcnic_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 * data) +static void qlcnic_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) { struct qlcnic_adapter *adapter = netdev_priv(dev); struct qlcnic_esw_statistics port_stats; struct qlcnic_mac_statistics mac_stats; - int index, ret; - - for (index = 0; index < QLCNIC_STATS_LEN; index++) { - char *p = - (char *)adapter + - qlcnic_gstrings_stats[index].stat_offset; - data[index] = - (qlcnic_gstrings_stats[index].sizeof_stat == - sizeof(u64)) ? *(u64 *)p:(*(u32 *)p); + int index, ret, length, size; + char *p; + + memset(data, 0, stats->n_stats * sizeof(u64)); + length = QLCNIC_STATS_LEN; + for (index = 0; index < length; index++) { + p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset; + size = qlcnic_gstrings_stats[index].sizeof_stat; + *data++ = (size == sizeof(u64)) ? (*(u64 *)p) : ((*(u32 *)p)); } - /* Retrieve MAC statistics from firmware */ - memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics)); - qlcnic_get_mac_stats(adapter, &mac_stats); - qlcnic_fill_stats(&index, data, &mac_stats, QLCNIC_MAC_STATS); + if (qlcnic_83xx_check(adapter)) { + if (adapter->ahw->linkup) + qlcnic_83xx_get_stats(adapter, data); + return; + } else { + /* Retrieve MAC statistics from firmware */ + memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics)); + qlcnic_get_mac_stats(adapter, &mac_stats); + qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS); + } if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) return; @@ -1013,14 +1159,13 @@ qlcnic_get_ethtool_stats(struct net_device *dev, if (ret) return; - qlcnic_fill_stats(&index, data, &port_stats.rx, QLCNIC_ESW_STATS); - + qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS); ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); if (ret) return; - qlcnic_fill_stats(&index, data, &port_stats.tx, QLCNIC_ESW_STATS); + qlcnic_fill_stats(data, &port_stats.tx, QLCNIC_ESW_STATS); } static int qlcnic_set_led(struct net_device *dev, @@ -1030,6 +1175,8 @@ static int qlcnic_set_led(struct net_device *dev, int max_sds_rings = adapter->max_sds_rings; int err = -EIO, active = 1; + if (qlcnic_83xx_check(adapter)) + return -EOPNOTSUPP; if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { netdev_warn(dev, "LED test not supported for non " "privilege function\n"); @@ -1096,6 +1243,8 @@ qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct qlcnic_adapter *adapter = netdev_priv(dev); u32 wol_cfg; + if (qlcnic_83xx_check(adapter)) + return; wol->supported = 0; wol->wolopts = 0; @@ -1114,8 +1263,10 @@ qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct qlcnic_adapter *adapter = netdev_priv(dev); u32 wol_cfg; - if (wol->wolopts & ~WAKE_MAGIC) + if (qlcnic_83xx_check(adapter)) return -EOPNOTSUPP; + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); if (!(wol_cfg & (1 << adapter->portnum))) @@ -1307,7 +1458,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) return 0; } netdev_info(netdev, "Forcing a FW dump\n"); - qlcnic_dev_request_reset(adapter); + qlcnic_dev_request_reset(adapter, val->flag); break; case QLCNIC_DISABLE_FW_DUMP: if (fw_dump->enable && fw_dump->tmpl_hdr) { @@ -1327,7 +1478,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) return 0; case QLCNIC_FORCE_FW_RESET: netdev_info(netdev, "Forcing a FW reset\n"); - qlcnic_dev_request_reset(adapter); + qlcnic_dev_request_reset(adapter, val->flag); adapter->flags &= ~QLCNIC_FW_RESET_OWNER; return 0; case QLCNIC_SET_QUIESCENT: @@ -1341,8 +1492,8 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) netdev_err(netdev, "FW dump not supported\n"); return -ENOTSUPP; } - for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) { - if (val->flag == FW_DUMP_LEVELS[i]) { + for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) { + if (val->flag == qlcnic_fw_dump_level[i]) { fw_dump->tmpl_hdr->drv_cap_mask = val->flag; netdev_info(netdev, "Driver mask changed to: 0x%x\n", @@ -1386,10 +1537,3 @@ const struct ethtool_ops qlcnic_ethtool_ops = { .get_dump_data = qlcnic_get_dump_data, .set_dump = qlcnic_set_dump, }; - -const struct ethtool_ops qlcnic_ethtool_failed_ops = { - .get_settings = qlcnic_get_settings, - .get_drvinfo = qlcnic_get_drvinfo, - .set_msglevel = qlcnic_set_msglevel, - .get_msglevel = qlcnic_get_msglevel, -}; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h index 49cc1ac4f057..44197ca1456c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h @@ -1,6 +1,6 @@ /* * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation + * Copyright (c) 2009-2013 QLogic Corporation * * See LICENSE.qlcnic for copyright and licensing details. */ @@ -11,6 +11,8 @@ #include <linux/kernel.h> #include <linux/types.h> +#include "qlcnic_hw.h" + /* * The basic unit of access when reading/writing control registers. */ @@ -387,9 +389,6 @@ enum { #define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) #define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) -/* Lock IDs for ROM lock */ -#define ROM_LOCK_DRIVER 0x0d417340 - /****************************************************************************** * * Definitions specific to M25P flash @@ -449,13 +448,10 @@ enum { #define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) #define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) -#define QLCNIC_PCI_MN_2M (0) -#define QLCNIC_PCI_MS_2M (0x80000) #define QLCNIC_PCI_OCM0_2M (0x000c0000UL) #define QLCNIC_PCI_CRBSPACE (0x06000000UL) #define QLCNIC_PCI_CAMQM (0x04800000UL) #define QLCNIC_PCI_CAMQM_END (0x04800800UL) -#define QLCNIC_PCI_2MB_SIZE (0x00200000UL) #define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL) #define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM) @@ -491,7 +487,7 @@ enum { #define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \ (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000) - +#define MAX_CTL_CHECK 1000 #define TEST_AGT_CTRL (0x00) #define TA_CTL_START BIT_0 @@ -499,44 +495,6 @@ enum { #define TA_CTL_WRITE BIT_2 #define TA_CTL_BUSY BIT_3 -/* - * Register offsets for MN - */ -#define MIU_TEST_AGT_BASE (0x90) - -#define MIU_TEST_AGT_ADDR_LO (0x04) -#define MIU_TEST_AGT_ADDR_HI (0x08) -#define MIU_TEST_AGT_WRDATA_LO (0x10) -#define MIU_TEST_AGT_WRDATA_HI (0x14) -#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20) -#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24) -#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1))) -#define MIU_TEST_AGT_RDDATA_LO (0x18) -#define MIU_TEST_AGT_RDDATA_HI (0x1c) -#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28) -#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c) -#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1))) - -#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 -#define MIU_TEST_AGT_UPPER_ADDR(off) (0) - -/* - * Register offsets for MS - */ -#define SIU_TEST_AGT_BASE (0x60) - -#define SIU_TEST_AGT_ADDR_LO (0x04) -#define SIU_TEST_AGT_ADDR_HI (0x18) -#define SIU_TEST_AGT_WRDATA_LO (0x08) -#define SIU_TEST_AGT_WRDATA_HI (0x0c) -#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i))) -#define SIU_TEST_AGT_RDDATA_LO (0x10) -#define SIU_TEST_AGT_RDDATA_HI (0x14) -#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i))) - -#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8 -#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22) - /* XG Link status */ #define XG_LINK_UP 0x10 #define XG_LINK_DOWN 0x20 @@ -556,9 +514,6 @@ enum { #define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000) #define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg)) -#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150)) -#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154)) -#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158)) #define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100)) #define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120)) #define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124)) @@ -568,28 +523,17 @@ enum { #define QLCNIC_REG(X) (NIC_CRB_BASE+(X)) #define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X)) +#define QLCNIC_CDRP_MAX_ARGS 4 +#define QLCNIC_CDRP_ARG(i) (QLCNIC_REG(0x18 + ((i) * 4))) + #define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18)) -#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c)) -#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20)) -#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24)) #define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28)) -#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50)) -#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c)) - #define CRB_XG_STATE_P3P (QLCNIC_REG(0x98)) #define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8)) -#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec)) - -#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4)) - -#define CRB_V2P_0 (QLCNIC_REG(0x290)) -#define CRB_V2P(port) (CRB_V2P_0+((port)*4)) #define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0)) -#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128)) #define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c)) -#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0)) /* * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address @@ -616,11 +560,6 @@ enum { /* Lock IDs for PHY lock */ #define PHY_LOCK_DRIVER 0x44524956 -/* Used for PS PCI Memory access */ -#define PCIX_PS_OP_ADDR_LO (0x10000) -/* via CRB (PS side only) */ -#define PCIX_PS_OP_ADDR_HI (0x10004) - #define PCIX_INT_VECTOR (0x10100) #define PCIX_INT_MASK (0x10104) @@ -682,17 +621,6 @@ enum { #define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c)) #define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14)) -#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0)) -#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8)) -#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac)) -#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138)) -#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140)) - -#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) -#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) -#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) -#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174)) -#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c)) #define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c) #define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860) @@ -711,7 +639,6 @@ enum { #define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */ #define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */ -#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4))) #define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) #define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) #define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) @@ -744,6 +671,9 @@ enum { #define QLCNIC_HEARTBEAT_PERIOD_MSECS 200 #define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45 +#define QLCNIC_MAX_MC_COUNT 38 +#define QLCNIC_WATCHDOG_TIMEOUTVALUE 5 + #define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC))) #define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) @@ -766,26 +696,13 @@ struct qlcnic_legacy_intr_set { u32 pci_int_reg; }; -#define QLCNIC_FW_API 0x1b216c -#define QLCNIC_DRV_OP_MODE 0x1b2170 #define QLCNIC_MSIX_BASE 0x132110 #define QLCNIC_MAX_PCI_FUNC 8 #define QLCNIC_MAX_VLAN_FILTERS 64 -/* FW dump defines */ -#define MIU_TEST_CTR 0x41000090 -#define MIU_TEST_ADDR_LO 0x41000094 -#define MIU_TEST_ADDR_HI 0x41000098 #define FLASH_ROM_WINDOW 0x42110030 #define FLASH_ROM_DATA 0x42150000 - -static const u32 FW_DUMP_LEVELS[] = { - 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff }; - -static const u32 MIU_TEST_READ_DATA[] = { - 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, }; - #define QLCNIC_FW_DUMP_REG1 0x00130060 #define QLCNIC_FW_DUMP_REG2 0x001e0000 #define QLCNIC_FLASH_SEM2_LK 0x0013C010 @@ -796,7 +713,8 @@ static const u32 MIU_TEST_READ_DATA[] = { enum { QLCNIC_MGMT_FUNC = 0, QLCNIC_PRIV_FUNC = 1, - QLCNIC_NON_PRIV_FUNC = 2 + QLCNIC_NON_PRIV_FUNC = 2, + QLCNIC_UNKNOWN_FUNC_MODE = 3 }; enum { @@ -1013,6 +931,8 @@ enum { #define QLCNIC_NIU_PROMISC_MODE 1 #define QLCNIC_NIU_ALLMULTI_MODE 2 +#define QLCNIC_PCIE_SEM_TIMEOUT 10000 + struct crb_128M_2M_sub_block_map { unsigned valid; unsigned start_128M; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 7a6d5ebe4e0f..325e11e1ce0f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -1,6 +1,6 @@ /* * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation + * Copyright (c) 2009-2013 QLogic Corporation * * See LICENSE.qlcnic for copyright and licensing details. */ @@ -344,21 +344,26 @@ qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem) QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem))); } -static int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr) +int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr) { u32 data; if (qlcnic_82xx_check(adapter)) qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data); - else - return -EIO; + else { + data = qlcnic_83xx_rd_reg_indirect(adapter, addr); + if (data == -EIO) + return -EIO; + } return data; } -static void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data) +void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data) { if (qlcnic_82xx_check(adapter)) qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data); + else + qlcnic_83xx_wrt_reg_indirect(adapter, addr, data); } static int @@ -417,9 +422,8 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter, return 0; } -static int -qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, - __le16 vlan_id, unsigned op) +int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, + __le16 vlan_id, u8 op) { struct qlcnic_nic_req req; struct qlcnic_mac_req *mac_req; @@ -442,7 +446,29 @@ qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); } -static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr) +int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr) +{ + struct list_head *head; + struct qlcnic_mac_list_s *cur; + int err = -EINVAL; + + /* Delete MAC from the existing list */ + list_for_each(head, &adapter->mac_list) { + cur = list_entry(head, struct qlcnic_mac_list_s, list); + if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) { + err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr, + 0, QLCNIC_MAC_DEL); + if (err) + return err; + list_del(&cur->list); + kfree(cur); + return err; + } + } + return err; +} + +int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr) { struct list_head *head; struct qlcnic_mac_list_s *cur; @@ -455,11 +481,9 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr) } cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC); - if (cur == NULL) { - dev_err(&adapter->netdev->dev, - "failed to add mac address filter\n"); + if (cur == NULL) return -ENOMEM; - } + memcpy(cur->mac_addr, addr, ETH_ALEN); if (qlcnic_sre_macaddr_change(adapter, @@ -506,17 +530,17 @@ void qlcnic_set_multi(struct net_device *netdev) } send_fw_cmd: - if (mode == VPORT_MISS_MODE_ACCEPT_ALL) { + if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) { qlcnic_alloc_lb_filters_mem(adapter); - adapter->mac_learn = 1; + adapter->drv_mac_learn = true; } else { - adapter->mac_learn = 0; + adapter->drv_mac_learn = false; } qlcnic_nic_set_promisc(adapter, mode); } -int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) +int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) { struct qlcnic_nic_req req; u64 word; @@ -555,18 +579,20 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) struct hlist_node *tmp_hnode, *n; struct hlist_head *head; int i; + unsigned long time; + u8 cmd; - for (i = 0; i < adapter->fhash.fmax; i++) { + for (i = 0; i < adapter->fhash.fbucket_size; i++) { head = &(adapter->fhash.fhead[i]); - - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) - { - if (jiffies > - (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) { + hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { + cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : + QLCNIC_MAC_DEL; + time = tmp_fil->ftime; + if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) { qlcnic_sre_macaddr_change(adapter, - tmp_fil->faddr, tmp_fil->vlan_id, - tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : - QLCNIC_MAC_DEL); + tmp_fil->faddr, + tmp_fil->vlan_id, + cmd); spin_lock_bh(&adapter->mac_learn_lock); adapter->fhash.fnum--; hlist_del(&tmp_fil->fnode); @@ -575,6 +601,21 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) } } } + for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) { + head = &(adapter->rx_fhash.fhead[i]); + + hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) + { + time = tmp_fil->ftime; + if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) { + spin_lock_bh(&adapter->rx_mac_learn_lock); + adapter->rx_fhash.fnum--; + hlist_del(&tmp_fil->fnode); + spin_unlock_bh(&adapter->rx_mac_learn_lock); + kfree(tmp_fil); + } + } + } } void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter) @@ -583,14 +624,17 @@ void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter) struct hlist_node *tmp_hnode, *n; struct hlist_head *head; int i; + u8 cmd; - for (i = 0; i < adapter->fhash.fmax; i++) { + for (i = 0; i < adapter->fhash.fbucket_size; i++) { head = &(adapter->fhash.fhead[i]); - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { - qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr, - tmp_fil->vlan_id, tmp_fil->vlan_id ? - QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL); + cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : + QLCNIC_MAC_DEL; + qlcnic_sre_macaddr_change(adapter, + tmp_fil->faddr, + tmp_fil->vlan_id, + cmd); spin_lock_bh(&adapter->mac_learn_lock); adapter->fhash.fnum--; hlist_del(&tmp_fil->fnode); @@ -620,12 +664,13 @@ static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag) return rv; } -int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) +int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { if (qlcnic_set_fw_loopback(adapter, mode)) return -EIO; - if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) { + if (qlcnic_nic_set_promisc(adapter, + VPORT_MISS_MODE_ACCEPT_ALL)) { qlcnic_set_fw_loopback(adapter, 0); return -EIO; } @@ -634,11 +679,11 @@ int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) return 0; } -void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter) +int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { - int mode = VPORT_MISS_MODE_DROP; struct net_device *netdev = adapter->netdev; + mode = VPORT_MISS_MODE_DROP; qlcnic_set_fw_loopback(adapter, 0); if (netdev->flags & IFF_PROMISC) @@ -648,12 +693,13 @@ void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter) qlcnic_nic_set_promisc(adapter, mode); msleep(1000); + return 0; } /* * Send the interrupt coalescing parameter set by ethtool to the card. */ -int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter) +void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter) { struct qlcnic_nic_req req; int rv; @@ -675,10 +721,14 @@ int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter) if (rv != 0) dev_err(&adapter->netdev->dev, "Could not send interrupt coalescing parameters\n"); - return rv; } -int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable) +#define QLCNIC_ENABLE_IPV4_LRO 1 +#define QLCNIC_ENABLE_IPV6_LRO 2 +#define QLCNIC_NO_DEST_IPV4_CHECK (1 << 8) +#define QLCNIC_NO_DEST_IPV6_CHECK (2 << 8) + +int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable) { struct qlcnic_nic_req req; u64 word; @@ -694,7 +744,15 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable) word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); - req.words[0] = cpu_to_le64(enable); + word = 0; + if (enable) { + word = QLCNIC_ENABLE_IPV4_LRO | QLCNIC_NO_DEST_IPV4_CHECK; + if (adapter->ahw->capabilities2 & QLCNIC_FW_CAP2_HW_LRO_IPV6) + word |= QLCNIC_ENABLE_IPV6_LRO | + QLCNIC_NO_DEST_IPV6_CHECK; + } + + req.words[0] = cpu_to_le64(word); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) @@ -734,9 +792,12 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) } -#define RSS_HASHTYPE_IP_TCP 0x3 +#define QLCNIC_RSS_HASHTYPE_IP_TCP 0x3 +#define QLCNIC_ENABLE_TYPE_C_RSS BIT_10 +#define QLCNIC_RSS_FEATURE_FLAG (1ULL << 63) +#define QLCNIC_RSS_IND_TABLE_MASK 0x7ULL -int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable) +int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int enable) { struct qlcnic_nic_req req; u64 word; @@ -761,13 +822,19 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable) * 7-6: hash_type_ipv6 * 8: enable * 9: use indirection table - * 47-10: reserved - * 63-48: indirection table mask + * 10: type-c rss + * 11: udp rss + * 47-12: reserved + * 62-48: indirection table mask + * 63: feature flag */ - word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | - ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | + word = ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 4) | + ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 6) | ((u64)(enable & 0x1) << 8) | - ((0x7ULL) << 48); + ((u64)QLCNIC_RSS_IND_TABLE_MASK << 48) | + (u64)QLCNIC_ENABLE_TYPE_C_RSS | + (u64)QLCNIC_RSS_FEATURE_FLAG; + req.words[0] = cpu_to_le64(word); for (i = 0; i < 5; i++) req.words[i+1] = cpu_to_le64(key[i]); @@ -779,7 +846,8 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable) return rv; } -int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd) +void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter, + __be32 ip, int cmd) { struct qlcnic_nic_req req; struct qlcnic_ipaddr *ipa; @@ -801,23 +869,19 @@ int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd) dev_err(&adapter->netdev->dev, "could not notify %s IP 0x%x reuqest\n", (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip); - - return rv; } -int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable) +int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable) { struct qlcnic_nic_req req; u64 word; int rv; - memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable | (enable << 8)); - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, @@ -882,7 +946,8 @@ netdev_features_t qlcnic_fix_features(struct net_device *netdev, { struct qlcnic_adapter *adapter = netdev_priv(netdev); - if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) && + qlcnic_82xx_check(adapter)) { netdev_features_t changed = features ^ netdev->features; features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); } @@ -903,13 +968,15 @@ int qlcnic_set_features(struct net_device *netdev, netdev_features_t features) if (!(changed & NETIF_F_LRO)) return 0; - netdev->features = features ^ NETIF_F_LRO; + netdev->features ^= NETIF_F_LRO; if (qlcnic_config_hw_lro(adapter, hw_lro)) return -EIO; - if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter)) - return -EIO; + if (!hw_lro && qlcnic_82xx_check(adapter)) { + if (qlcnic_send_lro_cleanup(adapter)) + return -EIO; + } return 0; } @@ -981,8 +1048,8 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) return 0; } -int -qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data) +int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, + u32 data) { unsigned long flags; int rv; @@ -1013,7 +1080,7 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data) return -EIO; } -int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off) +int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off) { unsigned long flags; int rv; @@ -1042,7 +1109,6 @@ int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off) return -1; } - void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *ahw, u32 offset) { @@ -1268,7 +1334,7 @@ int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) return ret; } -int qlcnic_get_board_info(struct qlcnic_adapter *adapter) +int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter) { int offset, board_type, magic; struct pci_dev *pdev = adapter->pdev; @@ -1341,7 +1407,7 @@ qlcnic_wol_supported(struct qlcnic_adapter *adapter) return 0; } -int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) +int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) { struct qlcnic_nic_req req; int rv; @@ -1353,7 +1419,7 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); - req.words[0] = cpu_to_le64((u64)rate << 32); + req.words[0] = cpu_to_le64(((u64)rate << 32) | adapter->portnum); req.words[1] = cpu_to_le64(state); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); @@ -1362,3 +1428,56 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) return rv; } + +void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter) +{ + void __iomem *msix_base_addr; + u32 func; + u32 msix_base; + + pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func); + msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE; + msix_base = readl(msix_base_addr); + func = (func - msix_base) / QLCNIC_MSIX_TBL_PGSIZE; + adapter->ahw->pci_func = func; +} + +void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf, + loff_t offset, size_t size) +{ + u32 data; + u64 qmdata; + + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { + qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata); + memcpy(buf, &qmdata, size); + } else { + data = QLCRD32(adapter, offset); + memcpy(buf, &data, size); + } +} + +void qlcnic_82xx_write_crb(struct qlcnic_adapter *adapter, char *buf, + loff_t offset, size_t size) +{ + u32 data; + u64 qmdata; + + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { + memcpy(&qmdata, buf, size); + qlcnic_pci_camqm_write_2M(adapter, offset, qmdata); + } else { + memcpy(&data, buf, size); + QLCWR32(adapter, offset, data); + } +} + +int qlcnic_82xx_api_lock(struct qlcnic_adapter *adapter) +{ + return qlcnic_pcie_sem_lock(adapter, 5, 0); +} + +void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter) +{ + qlcnic_pcie_sem_unlock(adapter, 5); +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h new file mode 100644 index 000000000000..5b8749eda11f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -0,0 +1,194 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef __QLCNIC_HW_H +#define __QLCNIC_HW_H + +/* Common registers in 83xx and 82xx */ +enum qlcnic_regs { + QLCNIC_PEG_HALT_STATUS1 = 0, + QLCNIC_PEG_HALT_STATUS2, + QLCNIC_PEG_ALIVE_COUNTER, + QLCNIC_FLASH_LOCK_OWNER, + QLCNIC_FW_CAPABILITIES, + QLCNIC_CRB_DRV_ACTIVE, + QLCNIC_CRB_DEV_STATE, + QLCNIC_CRB_DRV_STATE, + QLCNIC_CRB_DRV_SCRATCH, + QLCNIC_CRB_DEV_PARTITION_INFO, + QLCNIC_CRB_DRV_IDC_VER, + QLCNIC_FW_VERSION_MAJOR, + QLCNIC_FW_VERSION_MINOR, + QLCNIC_FW_VERSION_SUB, + QLCNIC_CRB_DEV_NPAR_STATE, + QLCNIC_FW_IMG_VALID, + QLCNIC_CMDPEG_STATE, + QLCNIC_RCVPEG_STATE, + QLCNIC_ASIC_TEMP, + QLCNIC_FW_API, + QLCNIC_DRV_OP_MODE, + QLCNIC_FLASH_LOCK, + QLCNIC_FLASH_UNLOCK, +}; + +/* Read from an address offset from BAR0, existing registers */ +#define QLC_SHARED_REG_RD32(a, addr) \ + readl(((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr])) + +/* Write to an address offset from BAR0, existing registers */ +#define QLC_SHARED_REG_WR32(a, addr, value) \ + writel(value, ((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr])) + +/* Read from a direct address offset from BAR0, additional registers */ +#define QLCRDX(ahw, addr) \ + readl(((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr])) + +/* Write to a direct address offset from BAR0, additional registers */ +#define QLCWRX(ahw, addr, value) \ + writel(value, (((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr]))) + +#define QLCNIC_CMD_CONFIGURE_IP_ADDR 0x1 +#define QLCNIC_CMD_CONFIG_INTRPT 0x2 +#define QLCNIC_CMD_CREATE_RX_CTX 0x7 +#define QLCNIC_CMD_DESTROY_RX_CTX 0x8 +#define QLCNIC_CMD_CREATE_TX_CTX 0x9 +#define QLCNIC_CMD_DESTROY_TX_CTX 0xa +#define QLCNIC_CMD_CONFIGURE_LRO 0xC +#define QLCNIC_CMD_CONFIGURE_MAC_LEARNING 0xD +#define QLCNIC_CMD_GET_STATISTICS 0xF +#define QLCNIC_CMD_INTRPT_TEST 0x11 +#define QLCNIC_CMD_SET_MTU 0x12 +#define QLCNIC_CMD_READ_PHY 0x13 +#define QLCNIC_CMD_WRITE_PHY 0x14 +#define QLCNIC_CMD_READ_HW_REG 0x15 +#define QLCNIC_CMD_GET_FLOW_CTL 0x16 +#define QLCNIC_CMD_SET_FLOW_CTL 0x17 +#define QLCNIC_CMD_READ_MAX_MTU 0x18 +#define QLCNIC_CMD_READ_MAX_LRO 0x19 +#define QLCNIC_CMD_MAC_ADDRESS 0x1f +#define QLCNIC_CMD_GET_PCI_INFO 0x20 +#define QLCNIC_CMD_GET_NIC_INFO 0x21 +#define QLCNIC_CMD_SET_NIC_INFO 0x22 +#define QLCNIC_CMD_GET_ESWITCH_CAPABILITY 0x24 +#define QLCNIC_CMD_TOGGLE_ESWITCH 0x25 +#define QLCNIC_CMD_GET_ESWITCH_STATUS 0x26 +#define QLCNIC_CMD_SET_PORTMIRRORING 0x27 +#define QLCNIC_CMD_CONFIGURE_ESWITCH 0x28 +#define QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG 0x29 +#define QLCNIC_CMD_GET_ESWITCH_STATS 0x2a +#define QLCNIC_CMD_CONFIG_PORT 0x2e +#define QLCNIC_CMD_TEMP_SIZE 0x2f +#define QLCNIC_CMD_GET_TEMP_HDR 0x30 +#define QLCNIC_CMD_GET_MAC_STATS 0x37 +#define QLCNIC_CMD_SET_DRV_VER 0x38 +#define QLCNIC_CMD_CONFIGURE_RSS 0x41 +#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43 +#define QLCNIC_CMD_CONFIGURE_LED 0x44 +#define QLCNIC_CMD_CONFIG_MAC_VLAN 0x45 +#define QLCNIC_CMD_GET_LINK_EVENT 0x48 +#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49 +#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A +#define QLCNIC_CMD_INIT_NIC_FUNC 0x60 +#define QLCNIC_CMD_STOP_NIC_FUNC 0x61 +#define QLCNIC_CMD_IDC_ACK 0x63 +#define QLCNIC_CMD_SET_PORT_CONFIG 0x66 +#define QLCNIC_CMD_GET_PORT_CONFIG 0x67 +#define QLCNIC_CMD_GET_LINK_STATUS 0x68 +#define QLCNIC_CMD_SET_LED_CONFIG 0x69 +#define QLCNIC_CMD_GET_LED_CONFIG 0x6A +#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B + +#define QLCNIC_INTRPT_INTX 1 +#define QLCNIC_INTRPT_MSIX 3 +#define QLCNIC_INTRPT_ADD 1 +#define QLCNIC_INTRPT_DEL 2 + +#define QLCNIC_GET_CURRENT_MAC 1 +#define QLCNIC_SET_STATION_MAC 2 +#define QLCNIC_GET_DEFAULT_MAC 3 +#define QLCNIC_GET_FAC_DEF_MAC 4 +#define QLCNIC_SET_FAC_DEF_MAC 5 + +#define QLCNIC_MBX_LINK_EVENT 0x8001 +#define QLCNIC_MBX_COMP_EVENT 0x8100 +#define QLCNIC_MBX_REQUEST_EVENT 0x8101 +#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102 +#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130 +#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131 + +struct qlcnic_mailbox_metadata { + u32 cmd; + u32 in_args; + u32 out_args; +}; + +/* Mailbox ownership */ +#define QLCNIC_GET_OWNER(val) ((val) & (BIT_0 | BIT_1)) + +#define QLCNIC_SET_OWNER 1 +#define QLCNIC_CLR_OWNER 0 +#define QLCNIC_MBX_TIMEOUT 10000 + +#define QLCNIC_MBX_RSP_OK 1 +#define QLCNIC_MBX_PORT_RSP_OK 0x1a +#define QLCNIC_MBX_ASYNC_EVENT BIT_15 + +struct qlcnic_pci_info; +struct qlcnic_info; +struct qlcnic_cmd_args; +struct ethtool_stats; +struct pci_device_id; +struct qlcnic_host_sds_ring; +struct qlcnic_host_tx_ring; +struct qlcnic_host_tx_ring; +struct qlcnic_hardware_context; +struct qlcnic_adapter; + +int qlcnic_82xx_start_firmware(struct qlcnic_adapter *); +int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong); +int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32); +int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int); +int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32); +int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, + struct net_device *netdev); +void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, + u64 *uaddr, __le16 vlan_id); +void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter); +int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int); +void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter, + __be32, int); +int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int); +void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); +int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8); +int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8); +void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t); +void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t); +void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32); +int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8); +irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *); +int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *); +int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *); +int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *tx_ring, int); +int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8); +int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*); +int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); +int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); +int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*); +int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *, + struct qlcnic_adapter *, u32); +int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32); +int qlcnic_82xx_get_board_info(struct qlcnic_adapter *); +int qlcnic_82xx_config_led(struct qlcnic_adapter *, u32, u32); +void qlcnic_82xx_get_func_no(struct qlcnic_adapter *); +int qlcnic_82xx_api_lock(struct qlcnic_adapter *); +void qlcnic_82xx_api_unlock(struct qlcnic_adapter *); +void qlcnic_82xx_napi_enable(struct qlcnic_adapter *); +void qlcnic_82xx_napi_disable(struct qlcnic_adapter *); +void qlcnic_82xx_napi_del(struct qlcnic_adapter *); +#endif /* __QLCNIC_HW_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index de79cde233de..d28336fc65ab 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -1,15 +1,12 @@ /* * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation + * Copyright (c) 2009-2013 QLogic Corporation * * See LICENSE.qlcnic for copyright and licensing details. */ -#include <linux/netdevice.h> -#include <linux/delay.h> -#include <linux/slab.h> -#include <linux/if_vlan.h> #include "qlcnic.h" +#include "qlcnic_hw.h" struct crb_addr_pair { u32 addr; @@ -166,13 +163,12 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) { struct qlcnic_recv_context *recv_ctx; struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_host_tx_ring *tx_ring; int ring; recv_ctx = adapter->recv_ctx; if (recv_ctx->rds_rings == NULL) - goto skip_rds; + return; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; @@ -180,16 +176,6 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) rds_ring->rx_buf_arr = NULL; } kfree(recv_ctx->rds_rings); - -skip_rds: - if (adapter->tx_ring == NULL) - return; - - tx_ring = adapter->tx_ring; - vfree(tx_ring->cmd_buf_arr); - tx_ring->cmd_buf_arr = NULL; - kfree(adapter->tx_ring); - adapter->tx_ring = NULL; } int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) @@ -197,39 +183,16 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) struct qlcnic_recv_context *recv_ctx; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_rx_buffer *rx_buf; - int ring, i, size; - - struct qlcnic_cmd_buffer *cmd_buf_arr; - struct net_device *netdev = adapter->netdev; - - size = sizeof(struct qlcnic_host_tx_ring); - tx_ring = kzalloc(size, GFP_KERNEL); - if (tx_ring == NULL) { - dev_err(&netdev->dev, "failed to allocate tx ring struct\n"); - return -ENOMEM; - } - adapter->tx_ring = tx_ring; - - tx_ring->num_desc = adapter->num_txd; - tx_ring->txq = netdev_get_tx_queue(netdev, 0); - - cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); - if (cmd_buf_arr == NULL) { - dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n"); - goto err_out; - } - tx_ring->cmd_buf_arr = cmd_buf_arr; + int ring, i; recv_ctx = adapter->recv_ctx; - size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring); - rds_ring = kzalloc(size, GFP_KERNEL); - if (rds_ring == NULL) { - dev_err(&netdev->dev, "failed to allocate rds ring struct\n"); + rds_ring = kcalloc(adapter->max_rds_rings, + sizeof(struct qlcnic_host_rds_ring), GFP_KERNEL); + if (rds_ring == NULL) goto err_out; - } + recv_ctx->rds_rings = rds_ring; for (ring = 0; ring < adapter->max_rds_rings; ring++) { @@ -255,11 +218,9 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) break; } rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); - if (rds_ring->rx_buf_arr == NULL) { - dev_err(&netdev->dev, "Failed to allocate " - "rx buffer ring %d\n", ring); + if (rds_ring->rx_buf_arr == NULL) goto err_out; - } + INIT_LIST_HEAD(&rds_ring->free_list); /* * Now go through all of them, set reference handles @@ -327,7 +288,6 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter) long done = 0; cond_resched(); - while (done == 0) { done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS); done &= 2; @@ -416,8 +376,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) u32 off; struct pci_dev *pdev = adapter->pdev; - QLCWR32(adapter, CRB_CMDPEG_STATE, 0); - QLCWR32(adapter, CRB_RCVPEG_STATE, 0); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0); + QLC_SHARED_REG_WR32(adapter, QLCNIC_RCVPEG_STATE, 0); /* Halt all the indiviual PEGs and other blocks */ /* disable all I2Q */ @@ -482,10 +442,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) } buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); - if (buf == NULL) { - dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n"); + if (buf == NULL) return -ENOMEM; - } for (i = 0; i < n; i++) { if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || @@ -564,8 +522,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0); msleep(1); - QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); - QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); + QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); + QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); return 0; } @@ -576,7 +534,7 @@ static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter) int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT; do { - val = QLCRD32(adapter, CRB_CMDPEG_STATE); + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CMDPEG_STATE); switch (val) { case PHAN_INITIALIZE_COMPLETE: @@ -592,7 +550,8 @@ static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter) } while (--retries); - QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, + PHAN_INITIALIZE_FAILED); out_err: dev_err(&adapter->pdev->dev, "Command Peg initialization not " @@ -607,7 +566,7 @@ qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter) int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT; do { - val = QLCRD32(adapter, CRB_RCVPEG_STATE); + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_RCVPEG_STATE); if (val == PHAN_PEG_RCV_INITIALIZED) return 0; @@ -638,7 +597,7 @@ qlcnic_check_fw_status(struct qlcnic_adapter *adapter) if (err) return err; - QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, PHAN_INITIALIZE_ACK); return err; } @@ -649,7 +608,7 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { int timeo; u32 val; - val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); val = QLC_DEV_GET_DRV(val, adapter->portnum); if ((val & 0x3) != QLCNIC_TYPE_NIC) { dev_err(&adapter->pdev->dev, @@ -689,11 +648,9 @@ static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region, } entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header); - flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size); - if (flt_entry == NULL) { - dev_warn(&adapter->pdev->dev, "error allocating memory\n"); + flt_entry = vzalloc(entry_size); + if (flt_entry == NULL) return -EIO; - } ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION + sizeof(struct qlcnic_flt_header), @@ -1096,11 +1053,13 @@ qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter) u32 heartbeat, ret = -EIO; int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; - adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); + adapter->heartbeat = QLC_SHARED_REG_RD32(adapter, + QLCNIC_PEG_ALIVE_COUNTER); do { msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS); - heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); + heartbeat = QLC_SHARED_REG_RD32(adapter, + QLCNIC_PEG_ALIVE_COUNTER); if (heartbeat != adapter->heartbeat) { ret = QLCNIC_RCODE_SUCCESS; break; @@ -1270,7 +1229,7 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter) return -EINVAL; } - QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC); + QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLCNIC_BDINFO_MAGIC); return 0; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 09aa310b6194..6387e0cc3ea9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -1,3 +1,10 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <net/ip.h> @@ -5,9 +12,6 @@ #include "qlcnic.h" -#define QLCNIC_MAC_HASH(MAC)\ - ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25)) - #define TX_ETHER_PKT 0x01 #define TX_TCP_PKT 0x02 #define TX_UDP_PKT 0x03 @@ -84,6 +88,8 @@ #define qlcnic_get_lro_sts_mss(sts_data1) \ ((sts_data1 >> 32) & 0x0FFFF) +#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff) + /* opcode field in status_desc */ #define QLCNIC_SYN_OFFLOAD 0x03 #define QLCNIC_RXPKT_DESC 0x04 @@ -91,18 +97,152 @@ #define QLCNIC_RESPONSE_DESC 0x05 #define QLCNIC_LRO_DESC 0x12 +#define QLCNIC_TX_POLL_BUDGET 128 +#define QLCNIC_TCP_HDR_SIZE 20 +#define QLCNIC_TCP_TS_OPTION_SIZE 12 +#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) +#define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM) + +#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE) + /* for status field in status_desc */ #define STATUS_CKSUM_LOOP 0 #define STATUS_CKSUM_OK 2 -static void qlcnic_change_filter(struct qlcnic_adapter *adapter, - u64 uaddr, __le16 vlan_id, - struct qlcnic_host_tx_ring *tx_ring) +#define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF) +#define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF) +#define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7) +#define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF) +#define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF) +#define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF) +#define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF) +#define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF) +#define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7) +#define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1) +#define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1) +#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1) +#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1) + +struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *, + struct qlcnic_host_rds_ring *, u16, u16); + +inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + writel(0, tx_ring->crb_intr_mask); +} + +inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + writel(1, tx_ring->crb_intr_mask); +} + +static inline u8 qlcnic_mac_hash(u64 mac) +{ + return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff)); +} + +static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter, + u16 handle, u8 ring_id) +{ + if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) + return handle | (ring_id << 15); + else + return handle; +} + +static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data) +{ + return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0; +} + +void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb, + int loopback_pkt, __le16 vlan_id) +{ + struct ethhdr *phdr = (struct ethhdr *)(skb->data); + struct qlcnic_filter *fil, *tmp_fil; + struct hlist_node *tmp_hnode, *n; + struct hlist_head *head; + unsigned long time; + u64 src_addr = 0; + u8 hindex, found = 0, op; + int ret; + + memcpy(&src_addr, phdr->h_source, ETH_ALEN); + + if (loopback_pkt) { + if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax) + return; + + hindex = qlcnic_mac_hash(src_addr) & + (adapter->fhash.fbucket_size - 1); + head = &(adapter->rx_fhash.fhead[hindex]); + + hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { + if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && + tmp_fil->vlan_id == vlan_id) { + time = tmp_fil->ftime; + if (jiffies > (QLCNIC_READD_AGE * HZ + time)) + tmp_fil->ftime = jiffies; + return; + } + } + + fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); + if (!fil) + return; + + fil->ftime = jiffies; + memcpy(fil->faddr, &src_addr, ETH_ALEN); + fil->vlan_id = vlan_id; + spin_lock(&adapter->rx_mac_learn_lock); + hlist_add_head(&(fil->fnode), head); + adapter->rx_fhash.fnum++; + spin_unlock(&adapter->rx_mac_learn_lock); + } else { + hindex = qlcnic_mac_hash(src_addr) & + (adapter->fhash.fbucket_size - 1); + head = &(adapter->rx_fhash.fhead[hindex]); + spin_lock(&adapter->rx_mac_learn_lock); + hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { + if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && + tmp_fil->vlan_id == vlan_id) { + found = 1; + break; + } + } + + if (!found) { + spin_unlock(&adapter->rx_mac_learn_lock); + return; + } + + op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; + ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr, + vlan_id, op); + if (!ret) { + op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; + ret = qlcnic_sre_macaddr_change(adapter, + (u8 *)&src_addr, + vlan_id, op); + if (!ret) { + hlist_del(&(tmp_fil->fnode)); + adapter->rx_fhash.fnum--; + } + } + spin_unlock(&adapter->rx_mac_learn_lock); + } +} + +void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr, + __le16 vlan_id) { struct cmd_desc_type0 *hwdesc; struct qlcnic_nic_req *req; struct qlcnic_mac_req *mac_req; struct qlcnic_vlan_req *vlan_req; + struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; u32 producer; u64 word; @@ -128,14 +268,14 @@ static void qlcnic_change_filter(struct qlcnic_adapter *adapter, } static void qlcnic_send_filter(struct qlcnic_adapter *adapter, - struct qlcnic_host_tx_ring *tx_ring, struct cmd_desc_type0 *first_desc, struct sk_buff *skb) { - struct ethhdr *phdr = (struct ethhdr *)(skb->data); struct qlcnic_filter *fil, *tmp_fil; struct hlist_node *tmp_hnode, *n; struct hlist_head *head; + struct net_device *netdev = adapter->netdev; + struct ethhdr *phdr = (struct ethhdr *)(skb->data); u64 src_addr = 0; __le16 vlan_id = 0; u8 hindex; @@ -143,23 +283,23 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, if (ether_addr_equal(phdr->h_source, adapter->mac_addr)) return; - if (adapter->fhash.fnum >= adapter->fhash.fmax) + if (adapter->fhash.fnum >= adapter->fhash.fmax) { + adapter->stats.mac_filter_limit_overrun++; + netdev_info(netdev, "Can not add more than %d mac addresses\n", + adapter->fhash.fmax); return; + } - /* Only NPAR capable devices support vlan based learning*/ - if (adapter->flags & QLCNIC_ESWITCH_ENABLED) - vlan_id = first_desc->vlan_TCI; memcpy(&src_addr, phdr->h_source, ETH_ALEN); - hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1); + hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1); head = &(adapter->fhash.fhead[hindex]); hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && - tmp_fil->vlan_id == vlan_id) { - + tmp_fil->vlan_id == vlan_id) { if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) - qlcnic_change_filter(adapter, src_addr, vlan_id, - tx_ring); + qlcnic_change_filter(adapter, &src_addr, + vlan_id); tmp_fil->ftime = jiffies; return; } @@ -169,17 +309,13 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, if (!fil) return; - qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring); - + qlcnic_change_filter(adapter, &src_addr, vlan_id); fil->ftime = jiffies; fil->vlan_id = vlan_id; memcpy(fil->faddr, &src_addr, ETH_ALEN); - spin_lock(&adapter->mac_learn_lock); - hlist_add_head(&(fil->fnode), head); adapter->fhash.fnum++; - spin_unlock(&adapter->mac_learn_lock); } @@ -474,8 +610,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb))) goto unwind_buff; - if (adapter->mac_learn) - qlcnic_send_filter(adapter, tx_ring, first_desc, skb); + if (adapter->drv_mac_learn) + qlcnic_send_filter(adapter, first_desc, skb); adapter->stats.txbytes += skb->len; adapter->stats.xmitcalled++; @@ -528,8 +664,8 @@ static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, } skb_reserve(skb, NET_IP_ALIGN); - dma = pci_map_single(pdev, skb->data, rds_ring->dma_size, - PCI_DMA_FROMDEVICE); + dma = pci_map_single(pdev, skb->data, + rds_ring->dma_size, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(pdev, dma)) { adapter->stats.rx_dma_map_error++; @@ -544,12 +680,13 @@ static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, } static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring) + struct qlcnic_host_rds_ring *rds_ring, + u8 ring_id) { struct rcv_desc *pdesc; struct qlcnic_rx_buffer *buffer; int count = 0; - uint32_t producer; + uint32_t producer, handle; struct list_head *head; if (!spin_trylock(&rds_ring->lock)) @@ -557,7 +694,6 @@ static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, producer = rds_ring->producer; head = &rds_ring->free_list; - while (!list_empty(head)) { buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); @@ -565,28 +701,29 @@ static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) break; } - count++; list_del(&buffer->list); /* make a rcv descriptor */ pdesc = &rds_ring->desc_head[producer]; - pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); + handle = qlcnic_get_ref_handle(adapter, + buffer->ref_handle, ring_id); + pdesc->reference_handle = cpu_to_le16(handle); pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); pdesc->addr_buffer = cpu_to_le64(buffer->dma); producer = get_next_index(producer, rds_ring->num_desc); } - if (count) { rds_ring->producer = producer; writel((producer - 1) & (rds_ring->num_desc - 1), rds_ring->crb_rcv_producer); } - spin_unlock(&rds_ring->lock); } -static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) +static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring, + int budget) { u32 sw_consumer, hw_consumer; int i, done, count = 0; @@ -594,7 +731,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; struct qlcnic_skb_frag *frag; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; if (!spin_trylock(&adapter->tx_clean_lock)) return 1; @@ -615,22 +751,19 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) PCI_DMA_TODEVICE); frag->dma = 0ULL; } - adapter->stats.xmitfinished++; dev_kfree_skb_any(buffer->skb); buffer->skb = NULL; } sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); - if (++count >= MAX_STATUS_HANDLE) + if (++count >= budget) break; } if (count && netif_running(netdev)) { tx_ring->sw_consumer = sw_consumer; - smp_mb(); - if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { netif_wake_queue(netdev); @@ -654,7 +787,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) */ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); done = (sw_consumer == hw_consumer); - spin_unlock(&adapter->tx_clean_lock); return done; @@ -662,16 +794,15 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) static int qlcnic_poll(struct napi_struct *napi, int budget) { + int tx_complete, work_done; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_adapter *adapter; - int tx_complete, work_done; sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); adapter = sds_ring->adapter; - - tx_complete = qlcnic_process_cmd_ring(adapter); + tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring, + budget); work_done = qlcnic_process_rcv_ring(sds_ring, budget); - if ((work_done < budget) && tx_complete) { napi_complete(&sds_ring->napi); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) @@ -804,26 +935,23 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index, } } -static struct sk_buff * -qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring, u16 index, - u16 cksum) +struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *ring, + u16 index, u16 cksum) { struct qlcnic_rx_buffer *buffer; struct sk_buff *skb; - buffer = &rds_ring->rx_buf_arr[index]; - + buffer = &ring->rx_buf_arr[index]; if (unlikely(buffer->skb == NULL)) { WARN_ON(1); return NULL; } - pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, + pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size, PCI_DMA_FROMDEVICE); skb = buffer->skb; - if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) { adapter->stats.csummed++; @@ -832,6 +960,7 @@ qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, skb_checksum_none_assert(skb); } + buffer->skb = NULL; return skb; @@ -871,8 +1000,8 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter, struct qlcnic_rx_buffer *buffer; struct sk_buff *skb; struct qlcnic_host_rds_ring *rds_ring; - int index, length, cksum, pkt_offset; - u16 vid = 0xffff; + int index, length, cksum, pkt_offset, is_lb_pkt; + u16 vid = 0xffff, t_vid; if (unlikely(ring >= adapter->max_rds_rings)) return NULL; @@ -892,6 +1021,14 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter, if (!skb) return buffer; + if (adapter->drv_mac_learn && + (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + t_vid = 0; + is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); + qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, + cpu_to_le16(t_vid)); + } + if (length > rds_ring->skb_size) skb_put(skb, rds_ring->skb_size); else @@ -933,10 +1070,11 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, struct sk_buff *skb; struct qlcnic_host_rds_ring *rds_ring; struct iphdr *iph; + struct ipv6hdr *ipv6h; struct tcphdr *th; bool push, timestamp; - int index, l2_hdr_offset, l4_hdr_offset; - u16 lro_length, length, data_offset, vid = 0xffff; + int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt; + u16 lro_length, length, data_offset, t_vid, vid = 0xffff; u32 seq_number; if (unlikely(ring > adapter->max_rds_rings)) @@ -961,6 +1099,14 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, if (!skb) return buffer; + if (adapter->drv_mac_learn && + (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + t_vid = 0; + is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); + qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, + cpu_to_le16(t_vid)); + } + if (timestamp) data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE; else @@ -976,12 +1122,21 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, } skb->protocol = eth_type_trans(skb, netdev); - iph = (struct iphdr *)skb->data; - th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); - length = (iph->ihl << 2) + (th->doff << 2) + lro_length; - iph->tot_len = htons(length); - iph->check = 0; - iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + + if (ntohs(skb->protocol) == ETH_P_IPV6) { + ipv6h = (struct ipv6hdr *)skb->data; + th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr)); + length = (th->doff << 2) + lro_length; + ipv6h->payload_len = htons(length); + } else { + iph = (struct iphdr *)skb->data; + th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); + length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + iph->tot_len = htons(length); + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + } + th->psh = push; th->seq = htonl(seq_number); length = skb->len; @@ -1011,9 +1166,9 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) struct list_head *cur; struct status_desc *desc; struct qlcnic_rx_buffer *rxbuf; + int opcode, desc_cnt, count = 0; u64 sts_data0, sts_data1; - __le64 owner_phantom = cpu_to_le64(STATUS_OWNER_PHANTOM); - int opcode, ring, desc_cnt, count = 0; + u8 ring; u32 consumer = sds_ring->consumer; while (count < max) { @@ -1025,7 +1180,6 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); opcode = qlcnic_get_sts_opcode(sts_data0); - switch (opcode) { case QLCNIC_RXPKT_DESC: case QLCNIC_OLD_RXPKT_DESC: @@ -1045,18 +1199,16 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) default: goto skip; } - WARN_ON(desc_cnt > 1); if (likely(rxbuf)) list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); else adapter->stats.null_rxbuf++; - skip: for (; desc_cnt > 0; desc_cnt--) { desc = &sds_ring->desc_head[consumer]; - desc->status_desc_data[0] = owner_phantom; + desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW; consumer = get_next_index(consumer, sds_ring->num_desc); } count++; @@ -1064,7 +1216,6 @@ skip: for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &adapter->recv_ctx->rds_rings[ring]; - if (!list_empty(&sds_ring->free_list[ring])) { list_for_each(cur, &sds_ring->free_list[ring]) { rxbuf = list_entry(cur, struct qlcnic_rx_buffer, @@ -1077,7 +1228,7 @@ skip: spin_unlock(&rds_ring->lock); } - qlcnic_post_rx_buffers_nodb(adapter, rds_ring); + qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring); } if (count) { @@ -1089,12 +1240,12 @@ skip: } void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring) + struct qlcnic_host_rds_ring *rds_ring, u8 ring_id) { struct rcv_desc *pdesc; struct qlcnic_rx_buffer *buffer; int count = 0; - u32 producer; + u32 producer, handle; struct list_head *head; producer = rds_ring->producer; @@ -1115,7 +1266,9 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, /* make a rcv descriptor */ pdesc = &rds_ring->desc_head[producer]; pdesc->addr_buffer = cpu_to_le64(buffer->dma); - pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); + handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle, + ring_id); + pdesc->reference_handle = cpu_to_le16(handle); pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); producer = get_next_index(producer, rds_ring->num_desc); } @@ -1185,7 +1338,7 @@ static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring, return; } -void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) +void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) { struct qlcnic_adapter *adapter = sds_ring->adapter; struct status_desc *desc; @@ -1222,26 +1375,8 @@ void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) writel(consumer, sds_ring->crb_sts_consumer); } -void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac) -{ - u32 mac_low, mac_high; - int i; - - mac_low = off1; - mac_high = off2; - - if (alt_mac) { - mac_low |= (mac_low >> 16) | (mac_high << 16); - mac_high >>= 16; - } - - for (i = 0; i < 2; i++) - mac[i] = (u8)(mac_high >> ((1 - i) * 8)); - for (i = 2; i < 6; i++) - mac[i] = (u8)(mac_low >> ((5 - i) * 8)); -} - -int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) +int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, + struct net_device *netdev) { int ring, max_sds_rings; struct qlcnic_host_sds_ring *sds_ring; @@ -1254,8 +1389,7 @@ int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; - - if (ring == max_sds_rings - 1) + if (ring == adapter->max_sds_rings - 1) netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, QLCNIC_NETDEV_WEIGHT / max_sds_rings); else @@ -1263,10 +1397,15 @@ int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) QLCNIC_NETDEV_WEIGHT*2); } + if (qlcnic_alloc_tx_rings(adapter, netdev)) { + qlcnic_free_sds_rings(recv_ctx); + return -ENOMEM; + } + return 0; } -void qlcnic_napi_del(struct qlcnic_adapter *adapter) +void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; @@ -1278,9 +1417,10 @@ void qlcnic_napi_del(struct qlcnic_adapter *adapter) } qlcnic_free_sds_rings(adapter->recv_ctx); + qlcnic_free_tx_rings(adapter); } -void qlcnic_napi_enable(struct qlcnic_adapter *adapter) +void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; @@ -1296,7 +1436,7 @@ void qlcnic_napi_enable(struct qlcnic_adapter *adapter) } } -void qlcnic_napi_disable(struct qlcnic_adapter *adapter) +void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; @@ -1312,3 +1452,481 @@ void qlcnic_napi_disable(struct qlcnic_adapter *adapter) napi_disable(&sds_ring->napi); } } + +#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36) +#define QLC_83XX_LRO_LB_PKT (1ULL << 46) + +static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt) +{ + if (lro_pkt) + return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0; + else + return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0; +} + +static struct qlcnic_rx_buffer * +qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring, + u8 ring, u64 sts_data[]) +{ + struct net_device *netdev = adapter->netdev; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_rx_buffer *buffer; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + int index, length, cksum, is_lb_pkt; + u16 vid = 0xffff, t_vid; + + if (unlikely(ring >= adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = qlcnic_83xx_hndl(sts_data[0]); + if (unlikely(index >= rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + length = qlcnic_83xx_pktln(sts_data[0]); + cksum = qlcnic_83xx_csum_status(sts_data[1]); + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); + if (!skb) + return buffer; + + if (adapter->drv_mac_learn && + (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + t_vid = 0; + is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0); + qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, + cpu_to_le16(t_vid)); + } + + if (length > rds_ring->skb_size) + skb_put(skb, rds_ring->skb_size); + else + skb_put(skb, length); + + if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { + adapter->stats.rxdropped++; + dev_kfree_skb(skb); + return buffer; + } + + skb->protocol = eth_type_trans(skb, netdev); + + if (vid != 0xffff) + __vlan_hwaccel_put_tag(skb, vid); + + napi_gro_receive(&sds_ring->napi, skb); + + adapter->stats.rx_pkts++; + adapter->stats.rxbytes += length; + + return buffer; +} + +static struct qlcnic_rx_buffer * +qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter, + u8 ring, u64 sts_data[]) +{ + struct net_device *netdev = adapter->netdev; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_rx_buffer *buffer; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + struct iphdr *iph; + struct ipv6hdr *ipv6h; + struct tcphdr *th; + bool push; + int l2_hdr_offset, l4_hdr_offset; + int index, is_lb_pkt; + u16 lro_length, length, data_offset, gso_size; + u16 vid = 0xffff, t_vid; + + if (unlikely(ring > adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = qlcnic_83xx_hndl(sts_data[0]); + if (unlikely(index > rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + + lro_length = qlcnic_83xx_lro_pktln(sts_data[0]); + l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]); + l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]); + push = qlcnic_83xx_is_psh_bit(sts_data[1]); + + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); + if (!skb) + return buffer; + + if (adapter->drv_mac_learn && + (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + t_vid = 0; + is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1); + qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, + cpu_to_le16(t_vid)); + } + if (qlcnic_83xx_is_tstamp(sts_data[1])) + data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE; + else + data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE; + + skb_put(skb, lro_length + data_offset); + skb_pull(skb, l2_hdr_offset); + + if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { + adapter->stats.rxdropped++; + dev_kfree_skb(skb); + return buffer; + } + + skb->protocol = eth_type_trans(skb, netdev); + if (ntohs(skb->protocol) == ETH_P_IPV6) { + ipv6h = (struct ipv6hdr *)skb->data; + th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr)); + + length = (th->doff << 2) + lro_length; + ipv6h->payload_len = htons(length); + } else { + iph = (struct iphdr *)skb->data; + th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); + length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + iph->tot_len = htons(length); + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + } + + th->psh = push; + length = skb->len; + + if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) { + gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]); + skb_shinfo(skb)->gso_size = gso_size; + if (skb->protocol == htons(ETH_P_IPV6)) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + else + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + } + + if (vid != 0xffff) + __vlan_hwaccel_put_tag(skb, vid); + + netif_receive_skb(skb); + + adapter->stats.lro_pkts++; + adapter->stats.lrobytes += length; + return buffer; +} + +static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, + int max) +{ + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_adapter *adapter = sds_ring->adapter; + struct list_head *cur; + struct status_desc *desc; + struct qlcnic_rx_buffer *rxbuf = NULL; + u8 ring; + u64 sts_data[2]; + int count = 0, opcode; + u32 consumer = sds_ring->consumer; + + while (count < max) { + desc = &sds_ring->desc_head[consumer]; + sts_data[1] = le64_to_cpu(desc->status_desc_data[1]); + opcode = qlcnic_83xx_opcode(sts_data[1]); + if (!opcode) + break; + sts_data[0] = le64_to_cpu(desc->status_desc_data[0]); + ring = QLCNIC_FETCH_RING_ID(sts_data[0]); + + switch (opcode) { + case QLC_83XX_REG_DESC: + rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring, + ring, sts_data); + break; + case QLC_83XX_LRO_DESC: + rxbuf = qlcnic_83xx_process_lro(adapter, ring, + sts_data); + break; + default: + dev_info(&adapter->pdev->dev, + "Unkonwn opcode: 0x%x\n", opcode); + goto skip; + } + + if (likely(rxbuf)) + list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); + else + adapter->stats.null_rxbuf++; +skip: + desc = &sds_ring->desc_head[consumer]; + /* Reset the descriptor */ + desc->status_desc_data[1] = 0; + consumer = get_next_index(consumer, sds_ring->num_desc); + count++; + } + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx->rds_rings[ring]; + if (!list_empty(&sds_ring->free_list[ring])) { + list_for_each(cur, &sds_ring->free_list[ring]) { + rxbuf = list_entry(cur, struct qlcnic_rx_buffer, + list); + qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); + } + spin_lock(&rds_ring->lock); + list_splice_tail_init(&sds_ring->free_list[ring], + &rds_ring->free_list); + spin_unlock(&rds_ring->lock); + } + qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring); + } + if (count) { + sds_ring->consumer = consumer; + writel(consumer, sds_ring->crb_sts_consumer); + } + return count; +} + +static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) +{ + int tx_complete; + int work_done; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_adapter *adapter; + struct qlcnic_host_tx_ring *tx_ring; + + sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); + adapter = sds_ring->adapter; + /* tx ring count = 1 */ + tx_ring = adapter->tx_ring; + + tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); + work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); + if ((work_done < budget) && tx_complete) { + napi_complete(&sds_ring->napi); + qlcnic_83xx_enable_intr(adapter, sds_ring); + } + + return work_done; +} + +static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) +{ + int work_done; + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_adapter *adapter; + + budget = QLCNIC_TX_POLL_BUDGET; + tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); + adapter = tx_ring->adapter; + work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); + if (work_done) { + napi_complete(&tx_ring->napi); + if (test_bit(__QLCNIC_DEV_UP , &adapter->state)) + qlcnic_83xx_enable_tx_intr(adapter, tx_ring); + } + + return work_done; +} + +static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget) +{ + int work_done; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_adapter *adapter; + + sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); + adapter = sds_ring->adapter; + work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); + if (work_done < budget) { + napi_complete(&sds_ring->napi); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_83xx_enable_intr(adapter, sds_ring); + } + + return work_done; +} + +void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + napi_enable(&sds_ring->napi); + if (adapter->flags & QLCNIC_MSIX_ENABLED) + qlcnic_83xx_enable_intr(adapter, sds_ring); + } + + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + napi_enable(&tx_ring->napi); + qlcnic_83xx_enable_tx_intr(adapter, tx_ring); + } + } +} + +void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + if (adapter->flags & QLCNIC_MSIX_ENABLED) + qlcnic_83xx_disable_intr(adapter, sds_ring); + napi_synchronize(&sds_ring->napi); + napi_disable(&sds_ring->napi); + } + + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + qlcnic_83xx_disable_tx_intr(adapter, tx_ring); + napi_synchronize(&tx_ring->napi); + napi_disable(&tx_ring->napi); + } + } +} + +int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, + struct net_device *netdev) +{ + int ring, max_sds_rings; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) + return -ENOMEM; + + max_sds_rings = adapter->max_sds_rings; + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + if (adapter->flags & QLCNIC_MSIX_ENABLED) + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_83xx_rx_poll, + QLCNIC_NETDEV_WEIGHT * 2); + else + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_83xx_poll, + QLCNIC_NETDEV_WEIGHT / max_sds_rings); + } + + if (qlcnic_alloc_tx_rings(adapter, netdev)) { + qlcnic_free_sds_rings(recv_ctx); + return -ENOMEM; + } + + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + netif_napi_add(netdev, &tx_ring->napi, + qlcnic_83xx_msix_tx_poll, + QLCNIC_NETDEV_WEIGHT); + } + } + + return 0; +} + +void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netif_napi_del(&sds_ring->napi); + } + + qlcnic_free_sds_rings(adapter->recv_ctx); + + if ((adapter->flags & QLCNIC_MSIX_ENABLED)) { + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + netif_napi_del(&tx_ring->napi); + } + } + + qlcnic_free_tx_rings(adapter); +} + +void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter, + int ring, u64 sts_data[]) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + int index, length; + + if (unlikely(ring >= adapter->max_rds_rings)) + return; + + rds_ring = &recv_ctx->rds_rings[ring]; + index = qlcnic_83xx_hndl(sts_data[0]); + if (unlikely(index >= rds_ring->num_desc)) + return; + + length = qlcnic_83xx_pktln(sts_data[0]); + + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); + if (!skb) + return; + + if (length > rds_ring->skb_size) + skb_put(skb, rds_ring->skb_size); + else + skb_put(skb, length); + + if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr)) + adapter->ahw->diag_cnt++; + else + dump_skb(skb, adapter); + + dev_kfree_skb_any(skb); + return; +} + +void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) +{ + struct qlcnic_adapter *adapter = sds_ring->adapter; + struct status_desc *desc; + u64 sts_data[2]; + int ring, opcode; + u32 consumer = sds_ring->consumer; + + desc = &sds_ring->desc_head[consumer]; + sts_data[0] = le64_to_cpu(desc->status_desc_data[0]); + sts_data[1] = le64_to_cpu(desc->status_desc_data[1]); + opcode = qlcnic_83xx_opcode(sts_data[1]); + if (!opcode) + return; + + ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0])); + qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data); + desc = &sds_ring->desc_head[consumer]; + desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); + consumer = get_next_index(consumer, sds_ring->num_desc); + sds_ring->consumer = consumer; + writel(consumer, sds_ring->crb_sts_consumer); +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index d833f5927891..5d5fd06c4b42 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1,24 +1,25 @@ /* * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation + * Copyright (c) 2009-2013 QLogic Corporation * * See LICENSE.qlcnic for copyright and licensing details. */ -#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include "qlcnic.h" +#include "qlcnic_hw.h" #include <linux/swab.h> #include <linux/dma-mapping.h> +#include <linux/if_vlan.h> #include <net/ip.h> #include <linux/ipv6.h> #include <linux/inetdevice.h> -#include <linux/sysfs.h> #include <linux/aer.h> #include <linux/log2.h> +#include <linux/pci.h> MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); MODULE_LICENSE("GPL"); @@ -29,28 +30,28 @@ char qlcnic_driver_name[] = "qlcnic"; static const char qlcnic_driver_string[] = "QLogic 1/10 GbE " "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID; -static struct workqueue_struct *qlcnic_wq; static int qlcnic_mac_learn; module_param(qlcnic_mac_learn, int, 0444); -MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); +MODULE_PARM_DESC(qlcnic_mac_learn, + "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)"); -static int qlcnic_use_msi = 1; +int qlcnic_use_msi = 1; MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); module_param_named(use_msi, qlcnic_use_msi, int, 0444); -static int qlcnic_use_msi_x = 1; +int qlcnic_use_msi_x = 1; MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444); -static int qlcnic_auto_fw_reset = 1; +int qlcnic_auto_fw_reset = 1; MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); -static int qlcnic_load_fw_file; +int qlcnic_load_fw_file; MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); -static int qlcnic_config_npars; +int qlcnic_config_npars; module_param(qlcnic_config_npars, int, 0444); MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); @@ -62,9 +63,6 @@ static void qlcnic_tx_timeout(struct net_device *netdev); static void qlcnic_attach_work(struct work_struct *work); static void qlcnic_fwinit_work(struct work_struct *work); static void qlcnic_fw_poll_work(struct work_struct *work); -static void qlcnic_schedule_work(struct qlcnic_adapter *adapter, - work_func_t func, int delay); -static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter); #ifdef CONFIG_NET_POLL_CONTROLLER static void qlcnic_poll_controller(struct net_device *netdev); #endif @@ -77,9 +75,9 @@ static irqreturn_t qlcnic_tmp_intr(int irq, void *data); static irqreturn_t qlcnic_intr(int irq, void *data); static irqreturn_t qlcnic_msi_intr(int irq, void *data); static irqreturn_t qlcnic_msix_intr(int irq, void *data); +static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data); static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); -static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long); static int qlcnic_start_firmware(struct qlcnic_adapter *); static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter); @@ -93,15 +91,24 @@ static int qlcnic_vlan_rx_del(struct net_device *, u16); #define QLCNIC_IS_TSO_CAPABLE(adapter) \ ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) +static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X) + return ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX; + else + return 1; +} + /* PCI Device ID Table */ #define ENTRY(device) \ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} -#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 - static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = { ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), + ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), {0,} }; @@ -120,6 +127,32 @@ static const u32 msi_tgt_status[8] = { ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 }; +static const u32 qlcnic_reg_tbl[] = { + 0x1B20A8, /* PEG_HALT_STAT1 */ + 0x1B20AC, /* PEG_HALT_STAT2 */ + 0x1B20B0, /* FW_HEARTBEAT */ + 0x1B2100, /* LOCK ID */ + 0x1B2128, /* FW_CAPABILITIES */ + 0x1B2138, /* drv active */ + 0x1B2140, /* dev state */ + 0x1B2144, /* drv state */ + 0x1B2148, /* drv scratch */ + 0x1B214C, /* dev partition info */ + 0x1B2174, /* drv idc ver */ + 0x1B2150, /* fw version major */ + 0x1B2154, /* fw version minor */ + 0x1B2158, /* fw version sub */ + 0x1B219C, /* npar state */ + 0x1B21FC, /* FW_IMG_VALID */ + 0x1B2250, /* CMD_PEG_STATE */ + 0x1B233C, /* RCV_PEG_STATE */ + 0x1B23B4, /* ASIC TEMP */ + 0x1B216C, /* FW api */ + 0x1B2170, /* drv op mode */ + 0x13C010, /* flash lock */ + 0x13C014, /* flash unlock */ +}; + static const struct qlcnic_board_info qlcnic_boards[] = { {0x1077, 0x8020, 0x1077, 0x203, "8200 Series Single Port 10GbE Converged Network Adapter" @@ -143,6 +176,7 @@ static const struct qlcnic_board_info qlcnic_boards[] = { }; #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards) +#define QLC_MAX_SDS_RINGS 8 static const struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG; @@ -164,35 +198,6 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx) recv_ctx->sds_rings = NULL; } -static void qlcnic_clear_stats(struct qlcnic_adapter *adapter) -{ - memset(&adapter->stats, 0, sizeof(adapter->stats)); -} - -static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable) -{ - u32 control; - int pos; - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - if (pos) { - pci_read_config_dword(pdev, pos, &control); - if (enable) - control |= PCI_MSIX_FLAGS_ENABLE; - else - control = 0; - pci_write_config_dword(pdev, pos, control); - } -} - -static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count) -{ - int i; - - for (i = 0; i < count; i++) - adapter->msix_entries[i].entry = i; -} - static int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) { @@ -204,12 +209,11 @@ qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) return -EIO; memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); - memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); /* set station address */ - if (!is_valid_ether_addr(netdev->perm_addr)) + if (!is_valid_ether_addr(netdev->dev_addr)) dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); @@ -225,7 +229,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) return -EOPNOTSUPP; if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; + return -EINVAL; if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { netif_device_detach(netdev); @@ -243,6 +247,85 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) return 0; } +static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *netdev, const unsigned char *addr) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err = -EOPNOTSUPP; + + if (!adapter->fdb_mac_learn) { + pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", + __func__); + return err; + } + + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { + if (is_unicast_ether_addr(addr)) + err = qlcnic_nic_del_mac(adapter, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(netdev, addr); + else + err = -EINVAL; + } + return err; +} + +static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *netdev, + const unsigned char *addr, u16 flags) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (!adapter->fdb_mac_learn) { + pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", + __func__); + return -EOPNOTSUPP; + } + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + pr_info("%s: FDB e-switch is not enabled\n", __func__); + return -EOPNOTSUPP; + } + + if (ether_addr_equal(addr, adapter->mac_addr)) + return err; + + if (is_unicast_ether_addr(addr)) + err = qlcnic_nic_add_mac(adapter, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(netdev, addr); + else + err = -EINVAL; + + return err; +} + +static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb, + struct net_device *netdev, int idx) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (!adapter->fdb_mac_learn) { + pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", + __func__); + return -EOPNOTSUPP; + } + + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) + idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx); + + return idx; +} + +static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter) +{ + while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + usleep_range(10000, 11000); + + cancel_delayed_work_sync(&adapter->fw_work); +} + static const struct net_device_ops qlcnic_netdev_ops = { .ndo_open = qlcnic_open, .ndo_stop = qlcnic_close, @@ -257,6 +340,9 @@ static const struct net_device_ops qlcnic_netdev_ops = { .ndo_tx_timeout = qlcnic_tx_timeout, .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add, .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del, + .ndo_fdb_add = qlcnic_fdb_add, + .ndo_fdb_del = qlcnic_fdb_del, + .ndo_fdb_dump = qlcnic_fdb_dump, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = qlcnic_poll_controller, #endif @@ -267,50 +353,125 @@ static const struct net_device_ops qlcnic_netdev_failed_ops = { }; static struct qlcnic_nic_template qlcnic_ops = { - .config_bridged_mode = qlcnic_config_bridged_mode, - .config_led = qlcnic_config_led, - .start_firmware = qlcnic_start_firmware + .config_bridged_mode = qlcnic_config_bridged_mode, + .config_led = qlcnic_82xx_config_led, + .start_firmware = qlcnic_82xx_start_firmware, + .request_reset = qlcnic_82xx_dev_request_reset, + .cancel_idc_work = qlcnic_82xx_cancel_idc_work, + .napi_add = qlcnic_82xx_napi_add, + .napi_del = qlcnic_82xx_napi_del, + .config_ipaddr = qlcnic_82xx_config_ipaddr, + .clear_legacy_intr = qlcnic_82xx_clear_legacy_intr, }; -static struct qlcnic_nic_template qlcnic_vf_ops = { - .config_bridged_mode = qlcnicvf_config_bridged_mode, - .config_led = qlcnicvf_config_led, - .start_firmware = qlcnicvf_start_firmware +struct qlcnic_nic_template qlcnic_vf_ops = { + .config_bridged_mode = qlcnicvf_config_bridged_mode, + .config_led = qlcnicvf_config_led, + .start_firmware = qlcnicvf_start_firmware }; -static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) +static struct qlcnic_hardware_ops qlcnic_hw_ops = { + .read_crb = qlcnic_82xx_read_crb, + .write_crb = qlcnic_82xx_write_crb, + .read_reg = qlcnic_82xx_hw_read_wx_2M, + .write_reg = qlcnic_82xx_hw_write_wx_2M, + .get_mac_address = qlcnic_82xx_get_mac_address, + .setup_intr = qlcnic_82xx_setup_intr, + .alloc_mbx_args = qlcnic_82xx_alloc_mbx_args, + .mbx_cmd = qlcnic_82xx_issue_cmd, + .get_func_no = qlcnic_82xx_get_func_no, + .api_lock = qlcnic_82xx_api_lock, + .api_unlock = qlcnic_82xx_api_unlock, + .add_sysfs = qlcnic_82xx_add_sysfs, + .remove_sysfs = qlcnic_82xx_remove_sysfs, + .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag, + .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx, + .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx, + .setup_link_event = qlcnic_82xx_linkevent_request, + .get_nic_info = qlcnic_82xx_get_nic_info, + .get_pci_info = qlcnic_82xx_get_pci_info, + .set_nic_info = qlcnic_82xx_set_nic_info, + .change_macvlan = qlcnic_82xx_sre_macaddr_change, + .napi_enable = qlcnic_82xx_napi_enable, + .napi_disable = qlcnic_82xx_napi_disable, + .config_intr_coal = qlcnic_82xx_config_intr_coalesce, + .config_rss = qlcnic_82xx_config_rss, + .config_hw_lro = qlcnic_82xx_config_hw_lro, + .config_loopback = qlcnic_82xx_set_lb_mode, + .clear_loopback = qlcnic_82xx_clear_lb_mode, + .config_promisc_mode = qlcnic_82xx_nic_set_promisc, + .change_l2_filter = qlcnic_82xx_change_filter, + .get_board_info = qlcnic_82xx_get_board_info, +}; + +int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) { struct pci_dev *pdev = adapter->pdev; - int err = -1; + int err = -1, i; + int max_tx_rings; + + if (!adapter->msix_entries) { + adapter->msix_entries = kcalloc(num_msix, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + } adapter->max_sds_rings = 1; adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED); - qlcnic_set_msix_bit(pdev, 0); if (adapter->ahw->msix_supported) { enable_msix: - qlcnic_init_msix_entries(adapter, num_msix); + for (i = 0; i < num_msix; i++) + adapter->msix_entries[i].entry = i; err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); if (err == 0) { adapter->flags |= QLCNIC_MSIX_ENABLED; - qlcnic_set_msix_bit(pdev, 1); - - adapter->max_sds_rings = num_msix; - + if (qlcnic_83xx_check(adapter)) { + adapter->ahw->num_msix = num_msix; + /* subtract mail box and tx ring vectors */ + max_tx_rings = adapter->max_drv_tx_rings; + adapter->max_sds_rings = num_msix - + max_tx_rings - 1; + } else { + adapter->max_sds_rings = num_msix; + } dev_info(&pdev->dev, "using msi-x interrupts\n"); return err; - } - if (err > 0) { - num_msix = rounddown_pow_of_two(err); - if (num_msix) + } else if (err > 0) { + dev_info(&pdev->dev, + "Unable to allocate %d MSI-X interrupt vectors\n", + num_msix); + if (qlcnic_83xx_check(adapter)) { + if (err < QLC_83XX_MINIMUM_VECTOR) + return err; + err -= (adapter->max_drv_tx_rings + 1); + num_msix = rounddown_pow_of_two(err); + num_msix += (adapter->max_drv_tx_rings + 1); + } else { + num_msix = rounddown_pow_of_two(err); + } + + if (num_msix) { + dev_info(&pdev->dev, + "Trying to allocate %d MSI-X interrupt vectors\n", + num_msix); goto enable_msix; + } + } else { + dev_info(&pdev->dev, + "Unable to allocate %d MSI-X interrupt vectors\n", + num_msix); } } + return err; } -static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) +static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) { + int err = 0; u32 offset, mask_reg; const struct qlcnic_legacy_intr_set *legacy_intrp; struct qlcnic_hardware_context *ahw = adapter->ahw; @@ -323,8 +484,10 @@ static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) offset); dev_info(&pdev->dev, "using msi interrupts\n"); adapter->msix_entries[0].vector = pdev->irq; - return; + return err; } + if (qlcnic_use_msi || qlcnic_use_msi_x) + return -EOPNOTSUPP; legacy_intrp = &legacy_intr[adapter->ahw->pci_func]; adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit; @@ -336,32 +499,47 @@ static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG); dev_info(&pdev->dev, "using legacy interrupts\n"); adapter->msix_entries[0].vector = pdev->irq; + return err; } -static void -qlcnic_setup_intr(struct qlcnic_adapter *adapter) +int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) { - int num_msix; + int num_msix, err = 0; - if (adapter->ahw->msix_supported) { + if (!num_intr) + num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS; + + if (adapter->ahw->msix_supported) num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(), - QLCNIC_DEF_NUM_STS_DESC_RINGS)); - } else + num_intr)); + else num_msix = 1; - if (!qlcnic_enable_msix(adapter, num_msix)) - return; + err = qlcnic_enable_msix(adapter, num_msix); + if (err == -ENOMEM || !err) + return err; - qlcnic_enable_msi_legacy(adapter); + err = qlcnic_enable_msi_legacy(adapter); + if (!err) + return err; + + return -EIO; } -static void -qlcnic_teardown_intr(struct qlcnic_adapter *adapter) +void qlcnic_teardown_intr(struct qlcnic_adapter *adapter) { if (adapter->flags & QLCNIC_MSIX_ENABLED) pci_disable_msix(adapter->pdev); if (adapter->flags & QLCNIC_MSI_ENABLED) pci_disable_msi(adapter->pdev); + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + if (adapter->ahw->intr_tbl) { + vfree(adapter->ahw->intr_tbl); + adapter->ahw->intr_tbl = NULL; + } } static void @@ -371,7 +549,36 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter) iounmap(adapter->ahw->pci_base0); } -static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) +static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter) +{ + struct qlcnic_pci_info *pci_info; + int ret; + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + switch (adapter->ahw->port_type) { + case QLCNIC_GBE: + adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_GBE_PORTS; + break; + case QLCNIC_XGBE: + adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_XG_PORTS; + break; + } + return 0; + } + + if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) + return 0; + + pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL); + if (!pci_info) + return -ENOMEM; + + ret = qlcnic_get_pci_info(adapter, pci_info); + kfree(pci_info); + return ret; +} + +int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) { struct qlcnic_pci_info *pci_info; int i, ret = 0, j = 0; @@ -423,8 +630,11 @@ static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) j++; } - for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) + for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) { adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; + if (qlcnic_83xx_check(adapter)) + qlcnic_enable_eswitch(adapter, i, 1); + } kfree(pci_info); return 0; @@ -462,40 +672,31 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter) QLC_DEV_SET_DRV(0xf, id)); } } else { - data = QLCRD32(adapter, QLCNIC_DRV_OP_MODE); + data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE); data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) | (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, ahw->pci_func)); } - QLCWR32(adapter, QLCNIC_DRV_OP_MODE, data); + QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data); qlcnic_api_unlock(adapter); err_lock: return ret; } -static void -qlcnic_check_vf(struct qlcnic_adapter *adapter) +static void qlcnic_check_vf(struct qlcnic_adapter *adapter, + const struct pci_device_id *ent) { - void __iomem *msix_base_addr; - void __iomem *priv_op; - u32 func; - u32 msix_base; u32 op_mode, priv_level; /* Determine FW API version */ - adapter->ahw->fw_hal_version = readl(adapter->ahw->pci_base0 + - QLCNIC_FW_API); + adapter->ahw->fw_hal_version = QLC_SHARED_REG_RD32(adapter, + QLCNIC_FW_API); /* Find PCI function number */ - pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func); - msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE; - msix_base = readl(msix_base_addr); - func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE; - adapter->ahw->pci_func = func; + qlcnic_get_func_no(adapter); /* Determine function privilege level */ - priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; - op_mode = readl(priv_op); + op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE); if (op_mode == QLC_DEV_DRV_DEFAULT) priv_level = QLCNIC_MGMT_FUNC; else @@ -512,12 +713,16 @@ qlcnic_check_vf(struct qlcnic_adapter *adapter) } #define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL +#define QLCNIC_83XX_BAR0_LENGTH 0x4000 static void qlcnic_get_bar_length(u32 dev_id, ulong *bar) { switch (dev_id) { case PCI_DEVICE_ID_QLOGIC_QLE824X: *bar = QLCNIC_82XX_BAR0_LENGTH; break; + case PCI_DEVICE_ID_QLOGIC_QLE834X: + *bar = QLCNIC_83XX_BAR0_LENGTH; + break; default: *bar = 0; } @@ -547,6 +752,7 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev, } dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); + ahw->pci_base0 = mem_ptr0; ahw->pci_len0 = pci_len0; offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func)); @@ -581,19 +787,26 @@ static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name) static void qlcnic_check_options(struct qlcnic_adapter *adapter) { + int err; u32 fw_major, fw_minor, fw_build, prev_fw_version; struct pci_dev *pdev = adapter->pdev; - struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; prev_fw_version = adapter->fw_version; - fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); - fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); - fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB); + fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR); + fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR); + fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB); adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build); - if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC) { + err = qlcnic_get_board_info(adapter); + if (err) { + dev_err(&pdev->dev, "Error getting board config info.\n"); + return; + } + if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) { if (fw_dump->tmpl_hdr == NULL || adapter->fw_version > prev_fw_version) { if (fw_dump->tmpl_hdr) @@ -604,8 +817,9 @@ qlcnic_check_options(struct qlcnic_adapter *adapter) } } - dev_info(&pdev->dev, "firmware v%d.%d.%d\n", - fw_major, fw_minor, fw_build); + dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d\n", + QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build); + if (adapter->ahw->port_type == QLCNIC_XGBE) { if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF; @@ -648,9 +862,19 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter) adapter->ahw->max_tx_ques = nic_info.max_tx_ques; adapter->ahw->max_rx_ques = nic_info.max_rx_ques; adapter->ahw->capabilities = nic_info.capabilities; + + if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { + u32 temp; + temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2); + adapter->ahw->capabilities2 = temp; + } adapter->ahw->max_mac_filters = nic_info.max_mac_filters; adapter->ahw->max_mtu = nic_info.max_mtu; + /* Disable NPAR for 83XX */ + if (qlcnic_83xx_check(adapter)) + return err; + if (adapter->ahw->capabilities & BIT_6) adapter->flags |= QLCNIC_ESWITCH_ENABLED; else @@ -709,7 +933,7 @@ void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, qlcnic_set_netdev_features(adapter, esw_cfg); } -static int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) +int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) { struct qlcnic_esw_func_cfg esw_cfg; @@ -730,14 +954,17 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, struct qlcnic_esw_func_cfg *esw_cfg) { struct net_device *netdev = adapter->netdev; - netdev_features_t features, vlan_features; + unsigned long features, vlan_features; + + if (qlcnic_83xx_check(adapter)) + return; features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_IPV6_CSUM | NETIF_F_GRO); + NETIF_F_IPV6_CSUM | NETIF_F_GRO); vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER); + NETIF_F_IPV6_CSUM); - if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) { + if (QLCNIC_IS_TSO_CAPABLE(adapter)) { features |= (NETIF_F_TSO | NETIF_F_TSO6); vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); } @@ -747,12 +974,19 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, if (esw_cfg->offload_flags & BIT_0) { netdev->features |= features; - if (!(esw_cfg->offload_flags & BIT_1)) + adapter->rx_csum = 1; + if (!(esw_cfg->offload_flags & BIT_1)) { netdev->features &= ~NETIF_F_TSO; - if (!(esw_cfg->offload_flags & BIT_2)) + features &= ~NETIF_F_TSO; + } + if (!(esw_cfg->offload_flags & BIT_2)) { netdev->features &= ~NETIF_F_TSO6; + features &= ~NETIF_F_TSO6; + } } else { netdev->features &= ~features; + features &= ~features; + adapter->rx_csum = 0; } netdev->vlan_features = (features & vlan_features); @@ -761,7 +995,6 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, static int qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter) { - void __iomem *priv_op; u32 op_mode, priv_level; int err = 0; @@ -772,8 +1005,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter) if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED) return 0; - priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; - op_mode = readl(priv_op); + op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE); priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); if (op_mode == QLC_DEV_DRV_DEFAULT) @@ -805,7 +1037,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter) return err; } -static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter) +int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter) { struct qlcnic_esw_func_cfg esw_cfg; struct qlcnic_npar_info *npar; @@ -838,6 +1070,7 @@ static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter) return 0; } + static int qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter, struct qlcnic_npar_info *npar, int pci_func) @@ -861,7 +1094,7 @@ qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter, return 0; } -static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter) +int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter) { int i, err; struct qlcnic_npar_info *npar; @@ -877,8 +1110,7 @@ static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter) npar = &adapter->npars[i]; pci_func = npar->pci_func; memset(&nic_info, 0, sizeof(struct qlcnic_info)); - err = qlcnic_get_nic_info(adapter, - &nic_info, pci_func); + err = qlcnic_get_nic_info(adapter, &nic_info, pci_func); if (err) return err; nic_info.min_tx_bw = npar->min_bw; @@ -909,10 +1141,12 @@ static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter) if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) return 0; - npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + npar_state = QLC_SHARED_REG_RD32(adapter, + QLCNIC_CRB_DEV_NPAR_STATE); while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) { msleep(1000); - npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + npar_state = QLC_SHARED_REG_RD32(adapter, + QLCNIC_CRB_DEV_NPAR_STATE); } if (!npar_opt_timeo) { dev_err(&adapter->pdev->dev, @@ -944,8 +1178,7 @@ qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter) return err; } -static int -qlcnic_start_firmware(struct qlcnic_adapter *adapter) +int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter) { int err; @@ -985,9 +1218,8 @@ check_fw_status: if (err) goto err_out; - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY); qlcnic_idc_debug_info(adapter, 1); - err = qlcnic_check_eswitch_mode(adapter); if (err) { dev_err(&adapter->pdev->dev, @@ -1005,7 +1237,7 @@ check_fw_status: return 0; err_out: - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); dev_err(&adapter->pdev->dev, "Device state set to failed\n"); qlcnic_release_firmware(adapter); @@ -1017,6 +1249,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter) { irq_handler_t handler; struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; int err, ring; unsigned long flags = 0; @@ -1024,7 +1257,8 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter) struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { - handler = qlcnic_tmp_intr; + if (qlcnic_82xx_check(adapter)) + handler = qlcnic_tmp_intr; if (!QLCNIC_IS_MSI_FAMILY(adapter)) flags |= IRQF_SHARED; @@ -1035,20 +1269,44 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter) handler = qlcnic_msi_intr; else { flags |= IRQF_SHARED; - handler = qlcnic_intr; + if (qlcnic_82xx_check(adapter)) + handler = qlcnic_intr; + else + handler = qlcnic_83xx_intr; } } adapter->irq = netdev->irq; - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); - err = request_irq(sds_ring->irq, handler, - flags, sds_ring->name, sds_ring); - if (err) - return err; + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) { + if (qlcnic_82xx_check(adapter) || + (qlcnic_83xx_check(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED))) { + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + snprintf(sds_ring->name, sizeof(int) + IFNAMSIZ, + "%s[%d]", netdev->name, ring); + err = request_irq(sds_ring->irq, handler, flags, + sds_ring->name, sds_ring); + if (err) + return err; + } + } + if (qlcnic_83xx_check(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED)) { + handler = qlcnic_msix_tx_intr; + for (ring = 0; ring < adapter->max_drv_tx_rings; + ring++) { + tx_ring = &adapter->tx_ring[ring]; + snprintf(tx_ring->name, sizeof(int) + IFNAMSIZ, + "%s[%d]", netdev->name, + adapter->max_sds_rings + ring); + err = request_irq(tx_ring->irq, handler, flags, + tx_ring->name, tx_ring); + if (err) + return err; + } + } } - return 0; } @@ -1057,21 +1315,48 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - free_irq(sds_ring->irq, sds_ring); + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) { + if (qlcnic_82xx_check(adapter) || + (qlcnic_83xx_check(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED))) { + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + free_irq(sds_ring->irq, sds_ring); + } + } + if (qlcnic_83xx_check(adapter)) { + for (ring = 0; ring < adapter->max_drv_tx_rings; + ring++) { + tx_ring = &adapter->tx_ring[ring]; + if (tx_ring->irq) + free_irq(tx_ring->irq, tx_ring); + } + } } } -static int -__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) +static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter) { - int ring; - u32 capab2; + u32 capab = 0; + + if (qlcnic_82xx_check(adapter)) { + if (adapter->ahw->capabilities2 & + QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) + adapter->flags |= QLCNIC_FW_LRO_MSS_CAP; + } else { + capab = adapter->ahw->capabilities; + if (QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(capab)) + adapter->flags |= QLCNIC_FW_LRO_MSS_CAP; + } +} +int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + int ring; struct qlcnic_host_rds_ring *rds_ring; if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) @@ -1081,19 +1366,14 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) return 0; if (qlcnic_set_eswitch_port_config(adapter)) return -EIO; - - if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { - capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2); - if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) - adapter->flags |= QLCNIC_FW_LRO_MSS_CAP; - } + qlcnic_get_lro_mss_capability(adapter); if (qlcnic_fw_create_ctx(adapter)) return -EIO; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &adapter->recv_ctx->rds_rings[ring]; - qlcnic_post_rx_buffers(adapter, rds_ring); + qlcnic_post_rx_buffers(adapter, rds_ring, ring); } qlcnic_set_multi(netdev); @@ -1118,10 +1398,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) return 0; } -/* Usage: During resume and firmware recovery module.*/ - -static int -qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) +int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) { int err = 0; @@ -1133,8 +1410,7 @@ qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) return err; } -static void -__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) +void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) { if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return; @@ -1166,8 +1442,7 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) /* Usage: During suspend and firmware recovery module */ -static void -qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) +void qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) { rtnl_lock(); if (netif_running(netdev)) @@ -1176,7 +1451,7 @@ qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) } -static int +int qlcnic_attach(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -1222,8 +1497,7 @@ err_out_napi_del: return err; } -static void -qlcnic_detach(struct qlcnic_adapter *adapter) +void qlcnic_detach(struct qlcnic_adapter *adapter) { if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return; @@ -1272,21 +1546,9 @@ out: static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter) { int err = 0; - adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context), - GFP_KERNEL); - if (!adapter->ahw) { - dev_err(&adapter->pdev->dev, - "Failed to allocate recv ctx resources for adapter\n"); - err = -ENOMEM; - goto err_out; - } adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context), GFP_KERNEL); if (!adapter->recv_ctx) { - dev_err(&adapter->pdev->dev, - "Failed to allocate recv ctx resources for adapter\n"); - kfree(adapter->ahw); - adapter->ahw = NULL; err = -ENOMEM; goto err_out; } @@ -1294,6 +1556,8 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter) adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT; adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US; adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS; + /* clear stats */ + memset(&adapter->stats, 0, sizeof(adapter->stats)); err_out: return err; } @@ -1307,8 +1571,9 @@ static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter) vfree(adapter->ahw->fw_dump.tmpl_hdr); adapter->ahw->fw_dump.tmpl_hdr = NULL; } - kfree(adapter->ahw); - adapter->ahw = NULL; + + kfree(adapter->ahw->reset.buff); + adapter->ahw->fw_dump.tmpl_hdr = NULL; } int qlcnic_diag_alloc_res(struct net_device *netdev, int test) @@ -1328,6 +1593,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) adapter->max_sds_rings = 1; adapter->ahw->diag_test = test; + adapter->ahw->linkup = 0; ret = qlcnic_attach(adapter); if (ret) { @@ -1344,7 +1610,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &adapter->recv_ctx->rds_rings[ring]; - qlcnic_post_rx_buffers(adapter, rds_ring); + qlcnic_post_rx_buffers(adapter, rds_ring, ring); } if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { @@ -1382,6 +1648,7 @@ qlcnic_reset_hw_context(struct qlcnic_adapter *adapter) netif_device_attach(netdev); clear_bit(__QLCNIC_RESETTING, &adapter->state); + dev_err(&adapter->pdev->dev, "%s:\n", __func__); return 0; } @@ -1425,34 +1692,40 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, int err; struct pci_dev *pdev = adapter->pdev; + adapter->rx_csum = 1; adapter->ahw->mc_enabled = 0; - adapter->ahw->max_mc_count = 38; + adapter->ahw->max_mc_count = QLCNIC_MAX_MC_COUNT; netdev->netdev_ops = &qlcnic_netdev_ops; - netdev->watchdog_timeo = 5*HZ; + netdev->watchdog_timeo = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ; qlcnic_change_mtu(netdev, netdev->mtu); SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); - netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_IPV6_CSUM | NETIF_F_GRO | + NETIF_F_HW_VLAN_RX); + netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + + if (QLCNIC_IS_TSO_CAPABLE(adapter)) { + netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6); + netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); + } - if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) - netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; - if (pci_using_dac == 1) - netdev->hw_features |= NETIF_F_HIGHDMA; + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } - netdev->vlan_features = netdev->hw_features; + if (qlcnic_vlan_tx_check(adapter)) + netdev->features |= (NETIF_F_HW_VLAN_TX); - if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX) - netdev->hw_features |= NETIF_F_HW_VLAN_TX; if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) - netdev->hw_features |= NETIF_F_LRO; - - netdev->features |= netdev->hw_features | - NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + netdev->features |= NETIF_F_LRO; + netdev->hw_features = netdev->features; netdev->irq = adapter->msix_entries[0].vector; err = register_netdev(netdev); @@ -1480,17 +1753,61 @@ static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac) return 0; } -static int -qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count) +void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter) { - adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry), - GFP_KERNEL); + int ring; + struct qlcnic_host_tx_ring *tx_ring; - if (adapter->msix_entries) - return 0; + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + if (tx_ring && tx_ring->cmd_buf_arr != NULL) { + vfree(tx_ring->cmd_buf_arr); + tx_ring->cmd_buf_arr = NULL; + } + } + if (adapter->tx_ring != NULL) + kfree(adapter->tx_ring); +} + +int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, + struct net_device *netdev) +{ + int ring, vector, index; + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_cmd_buffer *cmd_buf_arr; + + tx_ring = kcalloc(adapter->max_drv_tx_rings, + sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL); + if (tx_ring == NULL) + return -ENOMEM; - dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n"); - return -ENOMEM; + adapter->tx_ring = tx_ring; + + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + tx_ring->num_desc = adapter->num_txd; + tx_ring->txq = netdev_get_tx_queue(netdev, ring); + cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); + if (cmd_buf_arr == NULL) { + qlcnic_free_tx_rings(adapter); + return -ENOMEM; + } + memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); + tx_ring->cmd_buf_arr = cmd_buf_arr; + } + + if (qlcnic_83xx_check(adapter)) { + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + tx_ring->adapter = adapter; + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + index = adapter->max_sds_rings + ring; + vector = adapter->msix_entries[index].vector; + tx_ring->irq = vector; + } + } + } + return 0; } static int @@ -1498,9 +1815,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev = NULL; struct qlcnic_adapter *adapter = NULL; + struct qlcnic_hardware_context *ahw; int err, pci_using_dac = -1; - uint8_t revision_id; - char board_name[QLCNIC_MAX_BOARD_NAME_LEN]; + u32 capab2; + char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ err = pci_enable_device(pdev); if (err) @@ -1522,10 +1840,27 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); pci_enable_pcie_error_reporting(pdev); + ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL); + if (!ahw) + goto err_out_free_res; + + if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE824X) { + ahw->hw_ops = &qlcnic_hw_ops; + ahw->reg_tbl = (u32 *)qlcnic_reg_tbl; + } else if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE834X) { + qlcnic_83xx_register_map(ahw); + } else { + goto err_out_free_hw_res; + } + + err = qlcnic_setup_pci_map(pdev, ahw); + if (err) + goto err_out_free_hw_res; + netdev = alloc_etherdev(sizeof(struct qlcnic_adapter)); if (!netdev) { err = -ENOMEM; - goto err_out_free_res; + goto err_out_iounmap; } SET_NETDEV_DEV(netdev, &pdev->dev); @@ -1533,15 +1868,25 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; + adapter->ahw = ahw; + + adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic"); + if (adapter->qlcnic_wq == NULL) { + dev_err(&pdev->dev, "Failed to create workqueue\n"); + goto err_out_free_netdev; + } err = qlcnic_alloc_adapter_resources(adapter); if (err) goto err_out_free_netdev; adapter->dev_rst_time = jiffies; - revision_id = pdev->revision; - adapter->ahw->revision_id = revision_id; - adapter->mac_learn = qlcnic_mac_learn; + adapter->ahw->revision_id = pdev->revision; + if (qlcnic_mac_learn == FDB_MAC_LEARN) + adapter->fdb_mac_learn = true; + else if (qlcnic_mac_learn == DRV_MAC_LEARN) + adapter->drv_mac_learn = true; + adapter->max_drv_tx_rings = 1; rwlock_init(&adapter->ahw->crb_lock); mutex_init(&adapter->ahw->mem_lock); @@ -1549,31 +1894,32 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->tx_clean_lock); INIT_LIST_HEAD(&adapter->mac_list); - err = qlcnic_setup_pci_map(pdev, adapter->ahw); - if (err) - goto err_out_free_hw; - qlcnic_check_vf(adapter); - - /* This will be reset for mezz cards */ - adapter->portnum = adapter->ahw->pci_func; - - err = qlcnic_get_board_info(adapter); - if (err) { - dev_err(&pdev->dev, "Error getting board config info.\n"); - goto err_out_iounmap; - } - - err = qlcnic_setup_idc_param(adapter); - if (err) - goto err_out_iounmap; + if (qlcnic_82xx_check(adapter)) { + qlcnic_check_vf(adapter, ent); + adapter->portnum = adapter->ahw->pci_func; + err = qlcnic_start_firmware(adapter); + if (err) { + dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); + goto err_out_free_hw; + } - adapter->flags |= QLCNIC_NEED_FLR; + err = qlcnic_setup_idc_param(adapter); + if (err) + goto err_out_free_hw; - err = adapter->nic_ops->start_firmware(adapter); - if (err) { - dev_err(&pdev->dev, "Loading fw failed. Please Reboot\n" - "\t\tIf reboot doesn't help, try flashing the card\n"); - goto err_out_maintenance_mode; + adapter->flags |= QLCNIC_NEED_FLR; + } else if (qlcnic_83xx_check(adapter)) { + qlcnic_83xx_check_vf(adapter, ent); + adapter->portnum = adapter->ahw->pci_func; + err = qlcnic_83xx_init(adapter); + if (err) { + dev_err(&pdev->dev, "%s: failed\n", __func__); + goto err_out_free_hw; + } + } else { + dev_err(&pdev->dev, + "%s: failed. Please Reboot\n", __func__); + goto err_out_free_hw; } if (qlcnic_read_mac_addr(adapter)) @@ -1581,22 +1927,34 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (adapter->portnum == 0) { qlcnic_get_board_name(adapter, board_name); + pr_info("%s: %s Board Chip rev 0x%x\n", module_name(THIS_MODULE), board_name, adapter->ahw->revision_id); } + err = qlcnic_setup_intr(adapter, 0); + if (err) { + dev_err(&pdev->dev, "Failed to setup interrupt\n"); + goto err_out_disable_msi; + } - qlcnic_clear_stats(adapter); - - err = qlcnic_alloc_msix_entries(adapter, adapter->ahw->max_rx_ques); - if (err) - goto err_out_decr_ref; - - qlcnic_setup_intr(adapter); + if (qlcnic_83xx_check(adapter)) { + err = qlcnic_83xx_setup_mbx_intr(adapter); + if (err) + goto err_out_disable_msi; + } err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); if (err) - goto err_out_disable_msi; + goto err_out_disable_mbx_intr; + + if (qlcnic_82xx_check(adapter)) { + if (ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { + capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2); + if (capab2 & QLCNIC_FW_CAPABILITY_2_OCBB) + qlcnic_fw_cmd_set_drv_version(adapter); + } + } pci_set_drvdata(pdev, adapter); @@ -1615,29 +1973,37 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; } - if (adapter->mac_learn) + if (qlcnic_get_act_pci_func(adapter)) + goto err_out_disable_mbx_intr; + + if (adapter->drv_mac_learn) qlcnic_alloc_lb_filters_mem(adapter); - qlcnic_create_diag_entries(adapter); + qlcnic_add_sysfs(adapter); return 0; +err_out_disable_mbx_intr: + if (qlcnic_83xx_check(adapter)) + qlcnic_83xx_free_mbx_intr(adapter); + err_out_disable_msi: qlcnic_teardown_intr(adapter); - kfree(adapter->msix_entries); - -err_out_decr_ref: + qlcnic_cancel_idc_work(adapter); qlcnic_clr_all_drv_state(adapter, 0); -err_out_iounmap: - qlcnic_cleanup_pci_map(adapter); - err_out_free_hw: qlcnic_free_adapter_resources(adapter); err_out_free_netdev: free_netdev(netdev); +err_out_iounmap: + qlcnic_cleanup_pci_map(adapter); + +err_out_free_hw_res: + kfree(ahw); + err_out_free_res: pci_release_regions(pdev); @@ -1645,24 +2011,13 @@ err_out_disable_pdev: pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return err; - -err_out_maintenance_mode: - netdev->netdev_ops = &qlcnic_netdev_failed_ops; - SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops); - err = register_netdev(netdev); - if (err) { - dev_err(&pdev->dev, "failed to register net device\n"); - goto err_out_decr_ref; - } - pci_set_drvdata(pdev, adapter); - qlcnic_create_diag_entries(adapter); - return 0; } static void qlcnic_remove(struct pci_dev *pdev) { struct qlcnic_adapter *adapter; struct net_device *netdev; + struct qlcnic_hardware_context *ahw; adapter = pci_get_drvdata(pdev); if (adapter == NULL) @@ -1670,10 +2025,17 @@ static void qlcnic_remove(struct pci_dev *pdev) netdev = adapter->netdev; - qlcnic_cancel_fw_work(adapter); + qlcnic_cancel_idc_work(adapter); + ahw = adapter->ahw; unregister_netdev(netdev); + if (qlcnic_83xx_check(adapter)) { + qlcnic_83xx_free_mbx_intr(adapter); + qlcnic_83xx_register_nic_idc_func(adapter, 0); + cancel_delayed_work_sync(&adapter->idc_aen_work); + } + qlcnic_detach(adapter); if (adapter->npars != NULL) @@ -1689,9 +2051,8 @@ static void qlcnic_remove(struct pci_dev *pdev) qlcnic_free_lb_filters_mem(adapter); qlcnic_teardown_intr(adapter); - kfree(adapter->msix_entries); - qlcnic_remove_diag_entries(adapter); + qlcnic_remove_sysfs(adapter); qlcnic_cleanup_pci_map(adapter); @@ -1702,7 +2063,12 @@ static void qlcnic_remove(struct pci_dev *pdev) pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); + if (adapter->qlcnic_wq) { + destroy_workqueue(adapter->qlcnic_wq); + adapter->qlcnic_wq = NULL; + } qlcnic_free_adapter_resources(adapter); + kfree(ahw); free_netdev(netdev); } static int __qlcnic_shutdown(struct pci_dev *pdev) @@ -1713,7 +2079,7 @@ static int __qlcnic_shutdown(struct pci_dev *pdev) netif_device_detach(netdev); - qlcnic_cancel_fw_work(adapter); + qlcnic_cancel_idc_work(adapter); if (netif_running(netdev)) qlcnic_down(adapter, netdev); @@ -1726,7 +2092,6 @@ static int __qlcnic_shutdown(struct pci_dev *pdev) retval = pci_save_state(pdev); if (retval) return retval; - if (qlcnic_82xx_check(adapter)) { if (qlcnic_wol_supported(adapter)) { pci_enable_wake(pdev, PCI_D3cold, 1); @@ -1774,7 +2139,7 @@ qlcnic_resume(struct pci_dev *pdev) pci_set_master(pdev); pci_restore_state(pdev); - err = adapter->nic_ops->start_firmware(adapter); + err = qlcnic_start_firmware(adapter); if (err) { dev_err(&pdev->dev, "failed to start firmware\n"); return err; @@ -1797,14 +2162,8 @@ done: static int qlcnic_open(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); - u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); int err; - if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) { - netdev_err(netdev, "Device in FAILED state\n"); - return -EIO; - } - netif_carrier_off(netdev); err = qlcnic_attach(adapter); @@ -1832,6 +2191,7 @@ static int qlcnic_close(struct net_device *netdev) struct qlcnic_adapter *adapter = netdev_priv(netdev); __qlcnic_down(adapter, netdev); + return 0; } @@ -1839,22 +2199,53 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter) { void *head; int i; + struct net_device *netdev = adapter->netdev; + u32 filter_size = 0; + u16 act_pci_func = 0; if (adapter->fhash.fmax && adapter->fhash.fhead) return; + act_pci_func = adapter->ahw->act_pci_func; spin_lock_init(&adapter->mac_learn_lock); + spin_lock_init(&adapter->rx_mac_learn_lock); + + if (qlcnic_82xx_check(adapter)) { + filter_size = QLCNIC_LB_MAX_FILTERS; + adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE; + } else { + filter_size = QLC_83XX_LB_MAX_FILTERS; + adapter->fhash.fbucket_size = QLC_83XX_LB_BUCKET_SIZE; + } + + head = kcalloc(adapter->fhash.fbucket_size, + sizeof(struct hlist_head), GFP_ATOMIC); - head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head), - GFP_KERNEL); if (!head) return; - adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS; + adapter->fhash.fmax = (filter_size / act_pci_func); adapter->fhash.fhead = head; - for (i = 0; i < adapter->fhash.fmax; i++) + netdev_info(netdev, "active nic func = %d, mac filter size=%d\n", + act_pci_func, adapter->fhash.fmax); + + for (i = 0; i < adapter->fhash.fbucket_size; i++) INIT_HLIST_HEAD(&adapter->fhash.fhead[i]); + + adapter->rx_fhash.fbucket_size = adapter->fhash.fbucket_size; + + head = kcalloc(adapter->rx_fhash.fbucket_size, + sizeof(struct hlist_head), GFP_ATOMIC); + + if (!head) + return; + + adapter->rx_fhash.fmax = (filter_size / act_pci_func); + adapter->rx_fhash.fhead = head; + + for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) + INIT_HLIST_HEAD(&adapter->rx_fhash.fhead[i]); } static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter) @@ -1864,16 +2255,25 @@ static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter) adapter->fhash.fhead = NULL; adapter->fhash.fmax = 0; + + if (adapter->rx_fhash.fmax && adapter->rx_fhash.fhead) + kfree(adapter->rx_fhash.fhead); + + adapter->rx_fhash.fmax = 0; + adapter->rx_fhash.fhead = NULL; } -static int qlcnic_check_temp(struct qlcnic_adapter *adapter) +int qlcnic_check_temp(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; u32 temp_state, temp_val, temp = 0; int rv = 0; + if (qlcnic_83xx_check(adapter)) + temp = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP); + if (qlcnic_82xx_check(adapter)) - temp = QLCRD32(adapter, CRB_TEMP_STATE); + temp = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP); temp_state = qlcnic_get_temp_state(temp); temp_val = qlcnic_get_temp_val(temp); @@ -1933,7 +2333,7 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) return stats; } -static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter) +irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter) { u32 status; @@ -2009,6 +2409,14 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data) return IRQ_HANDLED; } +static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data) +{ + struct qlcnic_host_tx_ring *tx_ring = data; + + napi_schedule(&tx_ring->napi); + return IRQ_HANDLED; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void qlcnic_poll_controller(struct net_device *netdev) { @@ -2035,7 +2443,7 @@ qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding) val |= encoding << 7; val |= (jiffies - adapter->dev_rst_time) << 8; - QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val); adapter->dev_rst_time = jiffies; } @@ -2050,14 +2458,14 @@ qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state) if (qlcnic_api_lock(adapter)) return -EIO; - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); if (state == QLCNIC_DEV_NEED_RESET) QLC_DEV_SET_RST_RDY(val, adapter->portnum); else if (state == QLCNIC_DEV_NEED_QUISCENT) QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); qlcnic_api_unlock(adapter); @@ -2072,9 +2480,9 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter) if (qlcnic_api_lock(adapter)) return -EBUSY; - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); qlcnic_api_unlock(adapter); @@ -2089,20 +2497,22 @@ qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed) if (qlcnic_api_lock(adapter)) goto err; - val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE); QLC_DEV_CLR_REF_CNT(val, adapter->portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); if (failed) { - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_FAILED); dev_info(&adapter->pdev->dev, "Device state set to Failed. Please Reboot\n"); } else if (!(val & 0x11111111)) - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_COLD); - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); qlcnic_api_unlock(adapter); err: @@ -2117,12 +2527,13 @@ static int qlcnic_check_drv_state(struct qlcnic_adapter *adapter) { int act, state, active_mask; + struct qlcnic_hardware_context *ahw = adapter->ahw; - state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); + act = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE); if (adapter->flags & QLCNIC_FW_RESET_OWNER) { - active_mask = (~(1 << (adapter->ahw->pci_func * 4))); + active_mask = (~(1 << (ahw->pci_func * 4))); act = act & active_mask; } @@ -2135,7 +2546,7 @@ qlcnic_check_drv_state(struct qlcnic_adapter *adapter) static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter) { - u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER); + u32 val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_IDC_VER); if (val != QLCNIC_DRV_IDC_VER) { dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's" @@ -2159,19 +2570,21 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) if (qlcnic_api_lock(adapter)) return -1; - val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE); if (!(val & (1 << (portnum * 4)))) { QLC_DEV_SET_REF_CNT(val, portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); } - prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); QLCDB(adapter, HW, "Device state = %u\n", prev_state); switch (prev_state) { case QLCNIC_DEV_COLD: - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); - QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_INITIALIZING); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_IDC_VER, + QLCNIC_DRV_IDC_VER); qlcnic_idc_debug_info(adapter, 0); qlcnic_api_unlock(adapter); return 1; @@ -2182,15 +2595,15 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) return ret; case QLCNIC_DEV_NEED_RESET: - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); QLC_DEV_SET_RST_RDY(val, portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); break; case QLCNIC_DEV_NEED_QUISCENT: - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); QLC_DEV_SET_QSCNT_RDY(val, portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); break; case QLCNIC_DEV_FAILED: @@ -2207,7 +2620,7 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) do { msleep(1000); - prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); if (prev_state == QLCNIC_DEV_QUISCENT) continue; @@ -2222,9 +2635,9 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) if (qlcnic_api_lock(adapter)) return -1; - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); QLC_DEV_CLR_RST_QSCNT(val, portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); ret = qlcnic_check_idc_ver(adapter); qlcnic_api_unlock(adapter); @@ -2243,7 +2656,7 @@ qlcnic_fwinit_work(struct work_struct *work) if (qlcnic_api_lock(adapter)) goto err_ret; - dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); if (dev_state == QLCNIC_DEV_QUISCENT || dev_state == QLCNIC_DEV_NEED_QUISCENT) { qlcnic_api_unlock(adapter); @@ -2272,17 +2685,19 @@ qlcnic_fwinit_work(struct work_struct *work) if (!qlcnic_check_drv_state(adapter)) { skip_ack_check: - dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); if (dev_state == QLCNIC_DEV_NEED_RESET) { - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, - QLCNIC_DEV_INITIALIZING); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_INITIALIZING); set_bit(__QLCNIC_START_FW, &adapter->state); QLCDB(adapter, DRV, "Restarting fw\n"); qlcnic_idc_debug_info(adapter, 0); - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + val = QLC_SHARED_REG_RD32(adapter, + QLCNIC_CRB_DRV_STATE); QLC_DEV_SET_RST_RDY(val, adapter->portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + QLC_SHARED_REG_WR32(adapter, + QLCNIC_CRB_DRV_STATE, val); } qlcnic_api_unlock(adapter); @@ -2308,12 +2723,12 @@ skip_ack_check: qlcnic_api_unlock(adapter); wait_npar: - dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); switch (dev_state) { case QLCNIC_DEV_READY: - if (!adapter->nic_ops->start_firmware(adapter)) { + if (!qlcnic_start_firmware(adapter)) { qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); adapter->fw_wait_cnt = 0; return; @@ -2350,7 +2765,7 @@ qlcnic_detach_work(struct work_struct *work) } else qlcnic_down(adapter, netdev); - status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1); + status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1); if (status & QLCNIC_RCODE_FATAL_ERROR) { dev_err(&adapter->pdev->dev, @@ -2401,19 +2816,18 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter) { u32 state; - state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); if (state == QLCNIC_DEV_NPAR_NON_OPER) return; if (qlcnic_api_lock(adapter)) return; - QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, + QLCNIC_DEV_NPAR_NON_OPER); qlcnic_api_unlock(adapter); } -/*Transit to RESET state from READY state only */ -void -qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) +void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key) { u32 state, xg_val = 0, gb_val = 0; @@ -2428,25 +2842,22 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) dev_info(&adapter->pdev->dev, "Pause control frames disabled" " on all ports\n"); adapter->need_fw_reset = 1; + if (qlcnic_api_lock(adapter)) return; - state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) { - netdev_err(adapter->netdev, - "Device is in FAILED state, Please Reboot\n"); - qlcnic_api_unlock(adapter); - return; - } + state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); if (state == QLCNIC_DEV_READY) { - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_NEED_RESET); adapter->flags |= QLCNIC_FW_RESET_OWNER; QLCDB(adapter, DRV, "NEED_RESET state set\n"); qlcnic_idc_debug_info(adapter, 0); } - QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, + QLCNIC_DEV_NPAR_NON_OPER); qlcnic_api_unlock(adapter); } @@ -2457,34 +2868,22 @@ qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter) if (qlcnic_api_lock(adapter)) return; - QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, + QLCNIC_DEV_NPAR_OPER); QLCDB(adapter, DRV, "NPAR operational state set\n"); qlcnic_api_unlock(adapter); } -static void -qlcnic_schedule_work(struct qlcnic_adapter *adapter, - work_func_t func, int delay) +void qlcnic_schedule_work(struct qlcnic_adapter *adapter, + work_func_t func, int delay) { if (test_bit(__QLCNIC_AER, &adapter->state)) return; INIT_DELAYED_WORK(&adapter->fw_work, func); - queue_delayed_work(qlcnic_wq, &adapter->fw_work, - round_jiffies_relative(delay)); -} - -static void -qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter) -{ - while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - msleep(10); - - if (!adapter->fw_work.work.func) - return; - - cancel_delayed_work_sync(&adapter->fw_work); + queue_delayed_work(adapter->qlcnic_wq, &adapter->fw_work, + round_jiffies_relative(delay)); } static void @@ -2496,7 +2895,8 @@ qlcnic_attach_work(struct work_struct *work) u32 npar_state; if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) { - npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + npar_state = QLC_SHARED_REG_RD32(adapter, + QLCNIC_CRB_DEV_NPAR_STATE); if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO) qlcnic_clr_all_drv_state(adapter, 0); else if (npar_state != QLCNIC_DEV_NPAR_OPER) @@ -2536,16 +2936,16 @@ qlcnic_check_health(struct qlcnic_adapter *adapter) goto detach; if (adapter->need_fw_reset) - qlcnic_dev_request_reset(adapter); + qlcnic_dev_request_reset(adapter, 0); - state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); if (state == QLCNIC_DEV_NEED_RESET) { qlcnic_set_npar_non_operational(adapter); adapter->need_fw_reset = 1; } else if (state == QLCNIC_DEV_NEED_QUISCENT) goto detach; - heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); + heartbeat = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); if (heartbeat != adapter->heartbeat) { adapter->heartbeat = heartbeat; adapter->fw_fail_cnt = 0; @@ -2565,25 +2965,25 @@ qlcnic_check_health(struct qlcnic_adapter *adapter) adapter->flags |= QLCNIC_FW_HANG; - qlcnic_dev_request_reset(adapter); + qlcnic_dev_request_reset(adapter, 0); if (qlcnic_auto_fw_reset) clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); dev_err(&adapter->pdev->dev, "firmware hang detected\n"); + peg_status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1); dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n" "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" "PEG_NET_4_PC: 0x%x\n", - QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1), - QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2), + peg_status, + QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2), QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c), QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c), QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c), QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c), QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c)); - peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1); if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) dev_err(&adapter->pdev->dev, "Firmware aborted with error code 0x00006700. " @@ -2667,17 +3067,39 @@ static int qlcnic_attach_func(struct pci_dev *pdev) if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) { adapter->need_fw_reset = 1; set_bit(__QLCNIC_START_FW, &adapter->state); - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_INITIALIZING); QLCDB(adapter, DRV, "Restarting fw\n"); } qlcnic_api_unlock(adapter); - err = adapter->nic_ops->start_firmware(adapter); + err = qlcnic_start_firmware(adapter); if (err) return err; qlcnic_clr_drv_state(adapter); - qlcnic_setup_intr(adapter); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + err = qlcnic_setup_intr(adapter, 0); + + if (err) { + kfree(adapter->msix_entries); + netdev_err(netdev, "failed to setup interrupt\n"); + return err; + } + + if (qlcnic_83xx_check(adapter)) { + /* register for NIC IDC AEN Events */ + qlcnic_83xx_register_nic_idc_func(adapter, 1); + err = qlcnic_83xx_setup_mbx_intr(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "failed to setup mbx interrupt\n"); + qlcnic_clr_all_drv_state(adapter, 1); + clear_bit(__QLCNIC_AER, &adapter->state); + goto done; + } + } if (netif_running(netdev)) { err = qlcnic_attach(adapter); @@ -2719,6 +3141,12 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) qlcnic_down(adapter, netdev); + if (qlcnic_83xx_check(adapter)) { + qlcnic_83xx_free_mbx_intr(adapter); + qlcnic_83xx_register_nic_idc_func(adapter, 0); + cancel_delayed_work_sync(&adapter->idc_aen_work); + } + qlcnic_detach(adapter); qlcnic_teardown_intr(adapter); @@ -2738,12 +3166,13 @@ static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev) static void qlcnic_io_resume(struct pci_dev *pdev) { + u32 state; struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); pci_cleanup_aer_uncorrect_error_status(pdev); - - if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY && - test_and_clear_bit(__QLCNIC_AER, &adapter->state)) + state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); + if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER, + &adapter->state)) qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); } @@ -2776,39 +3205,59 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) return err; } -int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val) +int qlcnic_validate_max_rss(u8 max_hw, u8 val) { - if (!qlcnic_use_msi_x && !qlcnic_use_msi) { - netdev_info(netdev, "no msix or msi support, hence no rss\n"); - return -EINVAL; + u32 max_allowed; + + if (max_hw > QLC_MAX_SDS_RINGS) { + max_hw = QLC_MAX_SDS_RINGS; + pr_info("max rss reset to %d\n", QLC_MAX_SDS_RINGS); } - if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) { - netdev_info(netdev, "rss_ring valid range [2 - %x] in " - " powers of 2\n", max_hw); + max_allowed = rounddown_pow_of_two(min_t(int, max_hw, + num_online_cpus())); + if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) { + pr_info("rss_ring valid range [2 - %x] in powers of 2\n", + max_allowed); return -EINVAL; } return 0; - } -int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data) +int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len) { + int err; struct net_device *netdev = adapter->netdev; - int err = 0; - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; netif_device_detach(netdev); if (netif_running(netdev)) __qlcnic_down(adapter, netdev); + qlcnic_detach(adapter); + + if (qlcnic_83xx_check(adapter)) + qlcnic_83xx_free_mbx_intr(adapter); + qlcnic_teardown_intr(adapter); + err = qlcnic_setup_intr(adapter, data); + if (err) { + kfree(adapter->msix_entries); + netdev_err(netdev, "failed to setup interrupt\n"); + return err; + } - if (qlcnic_enable_msix(adapter, data)) { - netdev_info(netdev, "failed setting max_rss; rss disabled\n"); - qlcnic_enable_msi_legacy(adapter); + if (qlcnic_83xx_check(adapter)) { + /* register for NIC IDC AEN Events */ + qlcnic_83xx_register_nic_idc_func(adapter, 1); + err = qlcnic_83xx_setup_mbx_intr(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "failed to setup mbx interrupt\n"); + goto done; + } } if (netif_running(netdev)) { @@ -2820,6 +3269,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data) goto done; qlcnic_restore_indev_addr(netdev, NETDEV_UP); } + err = len; done: netif_device_attach(netdev); clear_bit(__QLCNIC_RESETTING, &adapter->state); @@ -2858,8 +3308,7 @@ qlcnic_config_indev_addr(struct qlcnic_adapter *adapter, in_dev_put(indev); } -static void -qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event) +void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct net_device *dev; @@ -2867,12 +3316,14 @@ qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event) qlcnic_config_indev_addr(adapter, netdev, event); + rcu_read_lock(); for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) { dev = __vlan_find_dev_deep(netdev, vid); if (!dev) continue; qlcnic_config_indev_addr(adapter, dev, event); } + rcu_read_unlock(); } static int qlcnic_netdev_event(struct notifier_block *this, @@ -2940,9 +3391,11 @@ recheck: switch (event) { case NETDEV_UP: qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP); + break; case NETDEV_DOWN: qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN); + break; default: break; @@ -2960,11 +3413,10 @@ static struct notifier_block qlcnic_inetaddr_cb = { .notifier_call = qlcnic_inetaddr_event, }; #else -static void -qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event) +void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event) { } #endif -static struct pci_error_handlers qlcnic_err_handler = { +static const struct pci_error_handlers qlcnic_err_handler = { .error_detected = qlcnic_io_error_detected, .slot_reset = qlcnic_io_slot_reset, .resume = qlcnic_io_resume, @@ -2990,12 +3442,6 @@ static int __init qlcnic_init_module(void) printk(KERN_INFO "%s\n", qlcnic_driver_string); - qlcnic_wq = create_singlethread_workqueue("qlcnic"); - if (qlcnic_wq == NULL) { - printk(KERN_ERR "qlcnic: cannot create workqueue\n"); - return -ENOMEM; - } - #ifdef CONFIG_INET register_netdevice_notifier(&qlcnic_netdev_cb); register_inetaddr_notifier(&qlcnic_inetaddr_cb); @@ -3007,7 +3453,6 @@ static int __init qlcnic_init_module(void) unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); unregister_netdevice_notifier(&qlcnic_netdev_cb); #endif - destroy_workqueue(qlcnic_wq); } return ret; @@ -3017,14 +3462,12 @@ module_init(qlcnic_init_module); static void __exit qlcnic_exit_module(void) { - pci_unregister_driver(&qlcnic_driver); #ifdef CONFIG_INET unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); unregister_netdevice_notifier(&qlcnic_netdev_cb); #endif - destroy_workqueue(qlcnic_wq); } module_exit(qlcnic_exit_module); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 0b8d8625834c..abbd22c814a6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1,8 +1,25 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + #include "qlcnic.h" #include "qlcnic_hdr.h" +#include "qlcnic_83xx_hw.h" +#include "qlcnic_hw.h" #include <net/ip.h> +#define QLC_83XX_MINIDUMP_FLASH 0x520000 +#define QLC_83XX_OCM_INDEX 3 +#define QLC_83XX_PCI_INDEX 0 + +static const u32 qlcnic_ms_read_data[] = { + 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC +}; + #define QLCNIC_DUMP_WCRB BIT_0 #define QLCNIC_DUMP_RWCRB BIT_1 #define QLCNIC_DUMP_ANDCRB BIT_2 @@ -102,16 +119,55 @@ struct __queue { u8 rsvd3[2]; } __packed; +struct __pollrd { + u32 sel_addr; + u32 read_addr; + u32 sel_val; + u16 sel_val_stride; + u16 no_ops; + u32 poll_wait; + u32 poll_mask; + u32 data_size; + u8 rsvd[4]; +} __packed; + +struct __mux2 { + u32 sel_addr1; + u32 sel_addr2; + u32 sel_val1; + u32 sel_val2; + u32 no_ops; + u32 sel_val_mask; + u32 read_addr; + u8 sel_val_stride; + u8 data_size; + u8 rsvd[2]; +} __packed; + +struct __pollrdmwr { + u32 addr1; + u32 addr2; + u32 val1; + u32 val2; + u32 poll_wait; + u32 poll_mask; + u32 mod_mask; + u32 data_size; +} __packed; + struct qlcnic_dump_entry { struct qlcnic_common_entry_hdr hdr; union { - struct __crb crb; - struct __cache cache; - struct __ocm ocm; - struct __mem mem; - struct __mux mux; - struct __queue que; - struct __ctrl ctrl; + struct __crb crb; + struct __cache cache; + struct __ocm ocm; + struct __mem mem; + struct __mux mux; + struct __queue que; + struct __ctrl ctrl; + struct __pollrdmwr pollrdmwr; + struct __mux2 mux2; + struct __pollrd pollrd; } region; } __packed; @@ -131,6 +187,9 @@ enum qlcnic_minidump_opcode { QLCNIC_DUMP_L2_ITAG = 22, QLCNIC_DUMP_L2_DATA = 23, QLCNIC_DUMP_L2_INST = 24, + QLCNIC_DUMP_POLL_RD = 35, + QLCNIC_READ_MUX2 = 36, + QLCNIC_READ_POLLRDMWR = 37, QLCNIC_DUMP_READ_ROM = 71, QLCNIC_DUMP_READ_MEM = 72, QLCNIC_DUMP_READ_CTRL = 98, @@ -144,46 +203,17 @@ struct qlcnic_dump_operations { __le32 *); }; -static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data) -{ - u32 dest; - void __iomem *window_reg; - - dest = addr & 0xFFFF0000; - window_reg = bar0 + QLCNIC_FW_DUMP_REG1; - writel(dest, window_reg); - readl(window_reg); - window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr); - *data = readl(window_reg); -} - -static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data) -{ - u32 dest; - void __iomem *window_reg; - - dest = addr & 0xFFFF0000; - window_reg = bar0 + QLCNIC_FW_DUMP_REG1; - writel(dest, window_reg); - readl(window_reg); - window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr); - writel(data, window_reg); - readl(window_reg); -} - -/* FW dump related functions */ static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { int i; u32 addr, data; struct __crb *crb = &entry->region.crb; - void __iomem *base = adapter->ahw->pci_base0; addr = crb->addr; for (i = 0; i < crb->no_ops; i++) { - qlcnic_read_dump_reg(addr, base, &data); + data = qlcnic_ind_rd(adapter, addr); *buffer++ = cpu_to_le32(addr); *buffer++ = cpu_to_le32(data); addr += crb->stride; @@ -195,7 +225,6 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { int i, k, timeout = 0; - void __iomem *base = adapter->ahw->pci_base0; u32 addr, data; u8 no_ops; struct __ctrl *ctr = &entry->region.ctrl; @@ -211,28 +240,28 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, continue; switch (1 << k) { case QLCNIC_DUMP_WCRB: - qlcnic_write_dump_reg(addr, base, ctr->val1); + qlcnic_ind_wr(adapter, addr, ctr->val1); break; case QLCNIC_DUMP_RWCRB: - qlcnic_read_dump_reg(addr, base, &data); - qlcnic_write_dump_reg(addr, base, data); + data = qlcnic_ind_rd(adapter, addr); + qlcnic_ind_wr(adapter, addr, data); break; case QLCNIC_DUMP_ANDCRB: - qlcnic_read_dump_reg(addr, base, &data); - qlcnic_write_dump_reg(addr, base, - data & ctr->val2); + data = qlcnic_ind_rd(adapter, addr); + qlcnic_ind_wr(adapter, addr, + (data & ctr->val2)); break; case QLCNIC_DUMP_ORCRB: - qlcnic_read_dump_reg(addr, base, &data); - qlcnic_write_dump_reg(addr, base, - data | ctr->val3); + data = qlcnic_ind_rd(adapter, addr); + qlcnic_ind_wr(adapter, addr, + (data | ctr->val3)); break; case QLCNIC_DUMP_POLLCRB: while (timeout <= ctr->timeout) { - qlcnic_read_dump_reg(addr, base, &data); + data = qlcnic_ind_rd(adapter, addr); if ((data & ctr->val2) == ctr->val1) break; - msleep(1); + usleep_range(1000, 2000); timeout++; } if (timeout > ctr->timeout) { @@ -244,7 +273,7 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, case QLCNIC_DUMP_RD_SAVE: if (ctr->index_a) addr = t_hdr->saved_state[ctr->index_a]; - qlcnic_read_dump_reg(addr, base, &data); + data = qlcnic_ind_rd(adapter, addr); t_hdr->saved_state[ctr->index_v] = data; break; case QLCNIC_DUMP_WRT_SAVED: @@ -254,7 +283,7 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, data = ctr->val1; if (ctr->index_a) addr = t_hdr->saved_state[ctr->index_a]; - qlcnic_write_dump_reg(addr, base, data); + qlcnic_ind_wr(adapter, addr, data); break; case QLCNIC_DUMP_MOD_SAVE_ST: data = t_hdr->saved_state[ctr->index_v]; @@ -283,12 +312,11 @@ static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter, int loop; u32 val, data = 0; struct __mux *mux = &entry->region.mux; - void __iomem *base = adapter->ahw->pci_base0; val = mux->val; for (loop = 0; loop < mux->no_ops; loop++) { - qlcnic_write_dump_reg(mux->addr, base, val); - qlcnic_read_dump_reg(mux->read_addr, base, &data); + qlcnic_ind_wr(adapter, mux->addr, val); + data = qlcnic_ind_rd(adapter, mux->read_addr); *buffer++ = cpu_to_le32(val); *buffer++ = cpu_to_le32(data); val += mux->val_stride; @@ -301,17 +329,16 @@ static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter, { int i, loop; u32 cnt, addr, data, que_id = 0; - void __iomem *base = adapter->ahw->pci_base0; struct __queue *que = &entry->region.que; addr = que->read_addr; cnt = que->read_addr_cnt; for (loop = 0; loop < que->no_ops; loop++) { - qlcnic_write_dump_reg(que->sel_addr, base, que_id); + qlcnic_ind_wr(adapter, que->sel_addr, que_id); addr = que->read_addr; for (i = 0; i < cnt; i++) { - qlcnic_read_dump_reg(addr, base, &data); + data = qlcnic_ind_rd(adapter, addr); *buffer++ = cpu_to_le32(data); addr += que->read_addr_stride; } @@ -343,27 +370,27 @@ static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter, int i, count = 0; u32 fl_addr, size, val, lck_val, addr; struct __mem *rom = &entry->region.mem; - void __iomem *base = adapter->ahw->pci_base0; fl_addr = rom->addr; - size = rom->size/4; + size = rom->size / 4; lock_try: - lck_val = readl(base + QLCNIC_FLASH_SEM2_LK); + lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK); if (!lck_val && count < MAX_CTL_CHECK) { - msleep(10); + usleep_range(10000, 11000); count++; goto lock_try; } - writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID)); + QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, + adapter->ahw->pci_func); for (i = 0; i < size; i++) { addr = fl_addr & 0xFFFF0000; - qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr); + qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr); addr = LSW(fl_addr) + FLASH_ROM_DATA; - qlcnic_read_dump_reg(addr, base, &val); + val = qlcnic_ind_rd(adapter, addr); fl_addr += 4; *buffer++ = cpu_to_le32(val); } - readl(base + QLCNIC_FLASH_SEM2_ULK); + QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK); return rom->size; } @@ -372,18 +399,17 @@ static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter, { int i; u32 cnt, val, data, addr; - void __iomem *base = adapter->ahw->pci_base0; struct __cache *l1 = &entry->region.cache; val = l1->init_tag_val; for (i = 0; i < l1->no_ops; i++) { - qlcnic_write_dump_reg(l1->addr, base, val); - qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val)); + qlcnic_ind_wr(adapter, l1->addr, val); + qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val)); addr = l1->read_addr; cnt = l1->read_addr_num; while (cnt) { - qlcnic_read_dump_reg(addr, base, &data); + data = qlcnic_ind_rd(adapter, addr); *buffer++ = cpu_to_le32(data); addr += l1->read_addr_stride; cnt--; @@ -399,7 +425,6 @@ static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter, int i; u32 cnt, val, data, addr; u8 poll_mask, poll_to, time_out = 0; - void __iomem *base = adapter->ahw->pci_base0; struct __cache *l2 = &entry->region.cache; val = l2->init_tag_val; @@ -407,17 +432,17 @@ static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter, poll_to = MSB(MSW(l2->ctrl_val)); for (i = 0; i < l2->no_ops; i++) { - qlcnic_write_dump_reg(l2->addr, base, val); + qlcnic_ind_wr(adapter, l2->addr, val); if (LSW(l2->ctrl_val)) - qlcnic_write_dump_reg(l2->ctrl_addr, base, - LSW(l2->ctrl_val)); + qlcnic_ind_wr(adapter, l2->ctrl_addr, + LSW(l2->ctrl_val)); if (!poll_mask) goto skip_poll; do { - qlcnic_read_dump_reg(l2->ctrl_addr, base, &data); + data = qlcnic_ind_rd(adapter, l2->ctrl_addr); if (!(data & poll_mask)) break; - msleep(1); + usleep_range(1000, 2000); time_out++; } while (time_out <= poll_to); @@ -431,7 +456,7 @@ skip_poll: addr = l2->read_addr; cnt = l2->read_addr_num; while (cnt) { - qlcnic_read_dump_reg(addr, base, &data); + data = qlcnic_ind_rd(adapter, addr); *buffer++ = cpu_to_le32(data); addr += l2->read_addr_stride; cnt--; @@ -447,7 +472,6 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, u32 addr, data, test, ret = 0; int i, reg_read; struct __mem *mem = &entry->region.mem; - void __iomem *base = adapter->ahw->pci_base0; reg_read = mem->size; addr = mem->addr; @@ -462,13 +486,12 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, mutex_lock(&adapter->ahw->mem_lock); while (reg_read != 0) { - qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr); - qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0); - qlcnic_write_dump_reg(MIU_TEST_CTR, base, - TA_CTL_ENABLE | TA_CTL_START); + qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr); + qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0); + qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE); for (i = 0; i < MAX_CTL_CHECK; i++) { - qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test); + test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL); if (!(test & TA_CTL_BUSY)) break; } @@ -481,8 +504,7 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, } } for (i = 0; i < 4; i++) { - qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base, - &data); + data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]); *buffer++ = cpu_to_le32(data); } addr += 16; @@ -501,48 +523,388 @@ static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter, return 0; } -static const struct qlcnic_dump_operations fw_dump_ops[] = { - { QLCNIC_DUMP_NOP, qlcnic_dump_nop }, - { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb }, - { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux }, - { QLCNIC_DUMP_QUEUE, qlcnic_dump_que }, - { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom }, - { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm }, - { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl }, - { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache }, - { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache }, - { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache }, - { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache }, - { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache }, - { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache }, - { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache }, - { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache }, - { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom }, - { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory }, - { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl }, - { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop }, - { QLCNIC_DUMP_RDEND, qlcnic_dump_nop }, -}; - -/* Walk the template and collect dump for each entry in the dump template */ -static int -qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry, - u32 size) +static int qlcnic_valid_dump_entry(struct device *dev, + struct qlcnic_dump_entry *entry, u32 size) { int ret = 1; if (size != entry->hdr.cap_size) { - dev_info(dev, - "Invalid dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n", - entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size); - dev_info(dev, "Aborting further dump capture\n"); + dev_err(dev, + "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n", + entry->hdr.type, entry->hdr.mask, size, + entry->hdr.cap_size); ret = 0; } return ret; } +static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, + __le32 *buffer) +{ + struct __pollrdmwr *poll = &entry->region.pollrdmwr; + u32 data, wait_count, poll_wait, temp; + + poll_wait = poll->poll_wait; + + qlcnic_ind_wr(adapter, poll->addr1, poll->val1); + wait_count = 0; + + while (wait_count < poll_wait) { + data = qlcnic_ind_rd(adapter, poll->addr1); + if ((data & poll->poll_mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll_wait) { + dev_err(&adapter->pdev->dev, + "Timeout exceeded in %s, aborting dump\n", + __func__); + return 0; + } + + data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask; + qlcnic_ind_wr(adapter, poll->addr2, data); + qlcnic_ind_wr(adapter, poll->addr1, poll->val2); + wait_count = 0; + + while (wait_count < poll_wait) { + temp = qlcnic_ind_rd(adapter, poll->addr1); + if ((temp & poll->poll_mask) != 0) + break; + wait_count++; + } + + *buffer++ = cpu_to_le32(poll->addr2); + *buffer++ = cpu_to_le32(data); + + return 2 * sizeof(u32); + +} + +static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + struct __pollrd *pollrd = &entry->region.pollrd; + u32 data, wait_count, poll_wait, sel_val; + int i; + + poll_wait = pollrd->poll_wait; + sel_val = pollrd->sel_val; + + for (i = 0; i < pollrd->no_ops; i++) { + qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val); + wait_count = 0; + while (wait_count < poll_wait) { + data = qlcnic_ind_rd(adapter, pollrd->sel_addr); + if ((data & pollrd->poll_mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll_wait) { + dev_err(&adapter->pdev->dev, + "Timeout exceeded in %s, aborting dump\n", + __func__); + return 0; + } + + data = qlcnic_ind_rd(adapter, pollrd->read_addr); + *buffer++ = cpu_to_le32(sel_val); + *buffer++ = cpu_to_le32(data); + sel_val += pollrd->sel_val_stride; + } + return pollrd->no_ops * (2 * sizeof(u32)); +} + +static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + struct __mux2 *mux2 = &entry->region.mux2; + u32 data; + u32 t_sel_val, sel_val1, sel_val2; + int i; + + sel_val1 = mux2->sel_val1; + sel_val2 = mux2->sel_val2; + + for (i = 0; i < mux2->no_ops; i++) { + qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1); + t_sel_val = sel_val1 & mux2->sel_val_mask; + qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val); + data = qlcnic_ind_rd(adapter, mux2->read_addr); + *buffer++ = cpu_to_le32(t_sel_val); + *buffer++ = cpu_to_le32(data); + qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2); + t_sel_val = sel_val2 & mux2->sel_val_mask; + qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val); + data = qlcnic_ind_rd(adapter, mux2->read_addr); + *buffer++ = cpu_to_le32(t_sel_val); + *buffer++ = cpu_to_le32(data); + sel_val1 += mux2->sel_val_stride; + sel_val2 += mux2->sel_val_stride; + } + + return mux2->no_ops * (4 * sizeof(u32)); +} + +static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + u32 fl_addr, size; + struct __mem *rom = &entry->region.mem; + + fl_addr = rom->addr; + size = rom->size / 4; + + if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr, + (u8 *)buffer, size)) + return rom->size; + + return 0; +} + +static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = { + {QLCNIC_DUMP_NOP, qlcnic_dump_nop}, + {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb}, + {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux}, + {QLCNIC_DUMP_QUEUE, qlcnic_dump_que}, + {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom}, + {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm}, + {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl}, + {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom}, + {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory}, + {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl}, + {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop}, + {QLCNIC_DUMP_RDEND, qlcnic_dump_nop}, +}; + +static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = { + {QLCNIC_DUMP_NOP, qlcnic_dump_nop}, + {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb}, + {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux}, + {QLCNIC_DUMP_QUEUE, qlcnic_dump_que}, + {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom}, + {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm}, + {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl}, + {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd}, + {QLCNIC_READ_MUX2, qlcnic_read_mux2}, + {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr}, + {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom}, + {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory}, + {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl}, + {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop}, + {QLCNIC_DUMP_RDEND, qlcnic_dump_nop}, +}; + +static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size) +{ + uint64_t sum = 0; + int count = temp_size / sizeof(uint32_t); + while (count-- > 0) + sum += *temp_buffer++; + while (sum >> 32) + sum = (sum & 0xFFFFFFFF) + (sum >> 32); + return ~sum; +} + +static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter, + u8 *buffer, u32 size) +{ + int ret = 0; + + if (qlcnic_82xx_check(adapter)) + return -EIO; + + if (qlcnic_83xx_lock_flash(adapter)) + return -EIO; + + ret = qlcnic_83xx_lockless_flash_read32(adapter, + QLC_83XX_MINIDUMP_FLASH, + buffer, size / sizeof(u32)); + + qlcnic_83xx_unlock_flash(adapter); + + return ret; +} + +static int +qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_dump_template_hdr tmp_hdr; + u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32); + int ret = 0; + + if (qlcnic_82xx_check(adapter)) + return -EIO; + + if (qlcnic_83xx_lock_flash(adapter)) + return -EIO; + + ret = qlcnic_83xx_lockless_flash_read32(adapter, + QLC_83XX_MINIDUMP_FLASH, + (u8 *)&tmp_hdr, size); + + qlcnic_83xx_unlock_flash(adapter); + + cmd->rsp.arg[2] = tmp_hdr.size; + cmd->rsp.arg[3] = tmp_hdr.version; + + return ret; +} + +static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter, + u32 *version, u32 *temp_size, + u8 *use_flash_temp) +{ + int err = 0; + struct qlcnic_cmd_args cmd; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE)) + return -ENOMEM; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err != QLCNIC_RCODE_SUCCESS) { + if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) { + qlcnic_free_mbx_args(&cmd); + return -EIO; + } + *use_flash_temp = 1; + } + + *temp_size = cmd.rsp.arg[2]; + *version = cmd.rsp.arg[3]; + qlcnic_free_mbx_args(&cmd); + + if (!(*temp_size)) + return -EIO; + + return 0; +} + +static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter, + u32 *buffer, u32 temp_size) +{ + int err = 0, i; + void *tmp_addr; + __le32 *tmp_buf; + struct qlcnic_cmd_args cmd; + dma_addr_t tmp_addr_t = 0; + + tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, + &tmp_addr_t, GFP_KERNEL); + if (!tmp_addr) { + dev_err(&adapter->pdev->dev, + "Can't get memory for FW dump template\n"); + return -ENOMEM; + } + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) { + err = -ENOMEM; + goto free_mem; + } + + cmd.req.arg[1] = LSD(tmp_addr_t); + cmd.req.arg[2] = MSD(tmp_addr_t); + cmd.req.arg[3] = temp_size; + err = qlcnic_issue_cmd(adapter, &cmd); + + tmp_buf = tmp_addr; + if (err == QLCNIC_RCODE_SUCCESS) { + for (i = 0; i < temp_size / sizeof(u32); i++) + *buffer++ = __le32_to_cpu(*tmp_buf++); + } + + qlcnic_free_mbx_args(&cmd); + +free_mem: + dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t); + + return err; +} + +int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) +{ + int err; + u32 temp_size = 0; + u32 version, csum, *tmp_buf; + struct qlcnic_hardware_context *ahw; + struct qlcnic_dump_template_hdr *tmpl_hdr; + u8 use_flash_temp = 0; + + ahw = adapter->ahw; + + err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size, + &use_flash_temp); + if (err) { + dev_err(&adapter->pdev->dev, + "Can't get template size %d\n", err); + return -EIO; + } + + ahw->fw_dump.tmpl_hdr = vzalloc(temp_size); + if (!ahw->fw_dump.tmpl_hdr) + return -ENOMEM; + + tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr; + if (use_flash_temp) + goto flash_temp; + + err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size); + + if (err) { +flash_temp: + err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf, + temp_size); + + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to get minidump template header %d\n", + err); + vfree(ahw->fw_dump.tmpl_hdr); + ahw->fw_dump.tmpl_hdr = NULL; + return -EIO; + } + } + + csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size); + + if (csum) { + dev_err(&adapter->pdev->dev, + "Template header checksum validation failed\n"); + vfree(ahw->fw_dump.tmpl_hdr); + ahw->fw_dump.tmpl_hdr = NULL; + return -EIO; + } + + tmpl_hdr = ahw->fw_dump.tmpl_hdr; + tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; + ahw->fw_dump.enable = 1; + + return 0; +} + int qlcnic_dump_fw(struct qlcnic_adapter *adapter) { __le32 *buffer; + u32 ocm_window; char mesg[64]; char *msg[] = {mesg, NULL}; int i, k, ops_cnt, ops_index, dump_size = 0; @@ -550,12 +912,23 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) struct qlcnic_dump_entry *entry; struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr; + static const struct qlcnic_dump_operations *fw_dump_ops; + struct qlcnic_hardware_context *ahw; + + ahw = adapter->ahw; + + if (!fw_dump->enable) { + dev_info(&adapter->pdev->dev, "Dump not enabled\n"); + return -EIO; + } if (fw_dump->clr) { dev_info(&adapter->pdev->dev, "Previous dump not cleared, not capturing dump\n"); return -EIO; } + + netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n"); /* Calculate the size for dump data area only */ for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++) if (i & tmpl_hdr->drv_cap_mask) @@ -564,20 +937,27 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) return -EIO; fw_dump->data = vzalloc(dump_size); - if (!fw_dump->data) { - dev_info(&adapter->pdev->dev, - "Unable to allocate (%d KB) for fw dump\n", - dump_size / 1024); + if (!fw_dump->data) return -ENOMEM; - } + buffer = fw_dump->data; fw_dump->size = dump_size; no_entries = tmpl_hdr->num_entries; - ops_cnt = ARRAY_SIZE(fw_dump_ops); entry_offset = tmpl_hdr->offset; tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION; tmpl_hdr->sys_info[1] = adapter->fw_version; + if (qlcnic_82xx_check(adapter)) { + ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops); + fw_dump_ops = qlcnic_fw_dump_ops; + } else { + ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops); + fw_dump_ops = qlcnic_83xx_fw_dump_ops; + ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func]; + tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window; + tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func; + } + for (i = 0; i < no_entries; i++) { entry = (void *)tmpl_hdr + entry_offset; if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) { @@ -585,6 +965,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) entry_offset += entry->hdr.offset; continue; } + /* Find the handler for this entry */ ops_index = 0; while (ops_index < ops_cnt) { @@ -592,16 +973,17 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) break; ops_index++; } + if (ops_index == ops_cnt) { dev_info(&adapter->pdev->dev, "Invalid entry type %d, exiting dump\n", entry->hdr.type); goto error; } + /* Collect dump for this entry */ dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer); - if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, - dump)) + if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump)) entry->hdr.flags |= QLCNIC_DUMP_SKIP; buf_offset += entry->hdr.cap_size; entry_offset += entry->hdr.offset; @@ -616,8 +998,8 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) fw_dump->clr = 1; snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name); - dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n", - fw_dump->size); + dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n", + adapter->netdev->name, fw_dump->size); /* Send a udev event to notify availability of FW dump */ kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg); return 0; @@ -626,3 +1008,21 @@ error: vfree(fw_dump->data); return -EINVAL; } + +void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) +{ + u32 prev_version, current_version; + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; + struct pci_dev *pdev = adapter->pdev; + + prev_version = adapter->fw_version; + current_version = qlcnic_83xx_get_fw_version(adapter); + + if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { + if (fw_dump->tmpl_hdr) + vfree(fw_dump->tmpl_hdr); + if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) + dev_info(&pdev->dev, "Supports FW dump capability\n"); + } +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 341d37c867ff..987fb6f8adc3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -1,8 +1,16 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include "qlcnic.h" +#include "qlcnic_hw.h" #include <linux/swab.h> #include <linux/dma-mapping.h> @@ -13,6 +21,10 @@ #include <linux/aer.h> #include <linux/log2.h> +#include <linux/sysfs.h> + +#define QLC_STATUS_UNSUPPORTED_CMD -2 + int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) { return -EOPNOTSUPP; @@ -40,7 +52,7 @@ static ssize_t qlcnic_store_bridged_mode(struct device *dev, if (strict_strtoul(buf, 2, &new)) goto err_out; - if (!adapter->nic_ops->config_bridged_mode(adapter, !!new)) + if (!qlcnic_config_bridged_mode(adapter, !!new)) ret = len; err_out: @@ -80,9 +92,7 @@ static ssize_t qlcnic_show_diag_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - - return sprintf(buf, "%d\n", - !!(adapter->flags & QLCNIC_DIAG_ENABLED)); + return sprintf(buf, "%d\n", !!(adapter->flags & QLCNIC_DIAG_ENABLED)); } static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, @@ -111,10 +121,11 @@ static ssize_t qlcnic_store_beacon(struct device *dev, const char *buf, size_t len) { struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - int max_sds_rings = adapter->max_sds_rings; + struct qlcnic_hardware_context *ahw = adapter->ahw; + int err, max_sds_rings = adapter->max_sds_rings; u16 beacon; u8 b_state, b_rate; - int err; + unsigned long h_beacon; if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { dev_warn(dev, @@ -122,6 +133,41 @@ static ssize_t qlcnic_store_beacon(struct device *dev, return -EOPNOTSUPP; } + if (qlcnic_83xx_check(adapter) && + !test_bit(__QLCNIC_RESETTING, &adapter->state)) { + if (kstrtoul(buf, 2, &h_beacon)) + return -EINVAL; + + if (ahw->beacon_state == h_beacon) + return len; + + rtnl_lock(); + if (!ahw->beacon_state) { + if (test_and_set_bit(__QLCNIC_LED_ENABLE, + &adapter->state)) { + rtnl_unlock(); + return -EBUSY; + } + } + if (h_beacon) { + err = qlcnic_83xx_config_led(adapter, 1, h_beacon); + if (err) + goto beacon_err; + } else { + err = qlcnic_83xx_config_led(adapter, 0, !h_beacon); + if (err) + goto beacon_err; + } + /* set the current beacon state */ + ahw->beacon_state = h_beacon; +beacon_err: + if (!ahw->beacon_state) + clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); + + rtnl_unlock(); + return len; + } + if (len != sizeof(u16)) return QL_STATUS_INVALID_PARAM; @@ -154,11 +200,10 @@ static ssize_t qlcnic_store_beacon(struct device *dev, } err = qlcnic_config_led(adapter, b_state, b_rate); - - if (!err) { + if (!err) err = len; - adapter->ahw->beacon_state = b_state; - } + else + ahw->beacon_state = b_state; if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) qlcnic_diag_free_res(adapter->netdev, max_sds_rings); @@ -207,21 +252,13 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj, { struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u32 data; - u64 qmdata; int ret; ret = qlcnic_sysfs_validate_crb(adapter, offset, size); if (ret != 0) return ret; + qlcnic_read_crb(adapter, buf, offset, size); - if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { - qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata); - memcpy(buf, &qmdata, size); - } else { - data = QLCRD32(adapter, offset); - memcpy(buf, &data, size); - } return size; } @@ -231,21 +268,13 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj, { struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u32 data; - u64 qmdata; int ret; ret = qlcnic_sysfs_validate_crb(adapter, offset, size); if (ret != 0) return ret; - if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { - memcpy(&qmdata, buf, size); - qlcnic_pci_camqm_write_2M(adapter, offset, qmdata); - } else { - memcpy(&data, buf, size); - QLCWR32(adapter, offset, data); - } + qlcnic_write_crb(adapter, buf, offset, size); return size; } @@ -303,33 +332,44 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj, return size; } +static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func) +{ + int i; + for (i = 0; i < adapter->ahw->act_pci_func; i++) { + if (adapter->npars[i].pci_func == pci_func) + return i; + } + + return -1; +} + static int validate_pm_config(struct qlcnic_adapter *adapter, struct qlcnic_pm_func_cfg *pm_cfg, int count) { - u8 src_pci_func, s_esw_id, d_esw_id, dest_pci_func; - int i; + u8 src_pci_func, s_esw_id, d_esw_id; + u8 dest_pci_func; + int i, src_index, dest_index; for (i = 0; i < count; i++) { src_pci_func = pm_cfg[i].pci_func; dest_pci_func = pm_cfg[i].dest_npar; - if (src_pci_func >= QLCNIC_MAX_PCI_FUNC || - dest_pci_func >= QLCNIC_MAX_PCI_FUNC) - return QL_STATUS_INVALID_PARAM; + src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func); - if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC) + if (src_index < 0) return QL_STATUS_INVALID_PARAM; - if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC) + dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func); + if (dest_index < 0) return QL_STATUS_INVALID_PARAM; - s_esw_id = adapter->npars[src_pci_func].phy_port; - d_esw_id = adapter->npars[dest_pci_func].phy_port; + s_esw_id = adapter->npars[src_index].phy_port; + d_esw_id = adapter->npars[dest_index].phy_port; if (s_esw_id != d_esw_id) return QL_STATUS_INVALID_PARAM; } - return 0; + return 0; } static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, @@ -342,7 +382,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_pm_func_cfg *pm_cfg; u32 id, action, pci_func; - int count, rem, i, ret; + int count, rem, i, ret, index; count = size / sizeof(struct qlcnic_pm_func_cfg); rem = size % sizeof(struct qlcnic_pm_func_cfg); @@ -350,26 +390,32 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, return QL_STATUS_INVALID_PARAM; pm_cfg = (struct qlcnic_pm_func_cfg *)buf; - ret = validate_pm_config(adapter, pm_cfg, count); + if (ret) return ret; for (i = 0; i < count; i++) { pci_func = pm_cfg[i].pci_func; action = !!pm_cfg[i].action; - id = adapter->npars[pci_func].phy_port; - ret = qlcnic_config_port_mirroring(adapter, id, action, - pci_func); + index = qlcnic_is_valid_nic_func(adapter, pci_func); + if (index < 0) + return QL_STATUS_INVALID_PARAM; + + id = adapter->npars[index].phy_port; + ret = qlcnic_config_port_mirroring(adapter, id, + action, pci_func); if (ret) return ret; } for (i = 0; i < count; i++) { pci_func = pm_cfg[i].pci_func; - id = adapter->npars[pci_func].phy_port; - adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action; - adapter->npars[pci_func].dest_npar = id; + index = qlcnic_is_valid_nic_func(adapter, pci_func); + id = adapter->npars[index].phy_port; + adapter->npars[index].enable_pm = !!pm_cfg[i].action; + adapter->npars[index].dest_npar = id; } + return size; } @@ -383,16 +429,19 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp, struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC]; int i; + u8 pci_func; if (size != sizeof(pm_cfg)) return QL_STATUS_INVALID_PARAM; - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - if (adapter->npars[i].type != QLCNIC_TYPE_NIC) - continue; - pm_cfg[i].action = adapter->npars[i].enable_pm; - pm_cfg[i].dest_npar = 0; - pm_cfg[i].pci_func = i; + memset(&pm_cfg, 0, + sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC); + + for (i = 0; i < adapter->ahw->act_pci_func; i++) { + pci_func = adapter->npars[i].pci_func; + pm_cfg[pci_func].action = adapter->npars[i].enable_pm; + pm_cfg[pci_func].dest_npar = 0; + pm_cfg[pci_func].pci_func = i; } memcpy(buf, &pm_cfg, size); @@ -404,24 +453,33 @@ static int validate_esw_config(struct qlcnic_adapter *adapter, { u32 op_mode; u8 pci_func; - int i; + int i, ret; - op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE); + if (qlcnic_82xx_check(adapter)) + op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE); + else + op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); for (i = 0; i < count; i++) { pci_func = esw_cfg[i].pci_func; if (pci_func >= QLCNIC_MAX_PCI_FUNC) return QL_STATUS_INVALID_PARAM; - if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) { - if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) + if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) + if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0) return QL_STATUS_INVALID_PARAM; - } switch (esw_cfg[i].op_mode) { case QLCNIC_PORT_DEFAULTS: - if (QLC_DEV_GET_DRV(op_mode, pci_func) != - QLCNIC_NON_PRIV_FUNC) { + if (qlcnic_82xx_check(adapter)) { + ret = QLC_DEV_GET_DRV(op_mode, pci_func); + } else { + ret = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode, + pci_func); + esw_cfg[i].offload_flags = 0; + } + + if (ret != QLCNIC_NON_PRIV_FUNC) { if (esw_cfg[i].mac_anti_spoof != 0) return QL_STATUS_INVALID_PARAM; if (esw_cfg[i].mac_override != 1) @@ -444,6 +502,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter, return QL_STATUS_INVALID_PARAM; } } + return 0; } @@ -458,7 +517,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, struct qlcnic_esw_func_cfg *esw_cfg; struct qlcnic_npar_info *npar; int count, rem, i, ret; - u8 pci_func, op_mode = 0; + int index; + u8 op_mode = 0, pci_func; count = size / sizeof(struct qlcnic_esw_func_cfg); rem = size % sizeof(struct qlcnic_esw_func_cfg); @@ -471,10 +531,9 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, return ret; for (i = 0; i < count; i++) { - if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) { + if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) if (qlcnic_config_switch_port(adapter, &esw_cfg[i])) return QL_STATUS_INVALID_PARAM; - } if (adapter->ahw->pci_func != esw_cfg[i].pci_func) continue; @@ -503,7 +562,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, for (i = 0; i < count; i++) { pci_func = esw_cfg[i].pci_func; - npar = &adapter->npars[pci_func]; + index = qlcnic_is_valid_nic_func(adapter, pci_func); + npar = &adapter->npars[index]; switch (esw_cfg[i].op_mode) { case QLCNIC_PORT_DEFAULTS: npar->promisc_mode = esw_cfg[i].promisc_mode; @@ -533,18 +593,21 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file, struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC]; - u8 i; + u8 i, pci_func; if (size != sizeof(esw_cfg)) return QL_STATUS_INVALID_PARAM; - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - if (adapter->npars[i].type != QLCNIC_TYPE_NIC) - continue; - esw_cfg[i].pci_func = i; - if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i])) + memset(&esw_cfg, 0, + sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC); + + for (i = 0; i < adapter->ahw->act_pci_func; i++) { + pci_func = adapter->npars[i].pci_func; + esw_cfg[pci_func].pci_func = pci_func; + if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) return QL_STATUS_INVALID_PARAM; } + memcpy(buf, &esw_cfg, size); return size; @@ -558,10 +621,7 @@ static int validate_npar_config(struct qlcnic_adapter *adapter, for (i = 0; i < count; i++) { pci_func = np_cfg[i].pci_func; - if (pci_func >= QLCNIC_MAX_PCI_FUNC) - return QL_STATUS_INVALID_PARAM; - - if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) + if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0) return QL_STATUS_INVALID_PARAM; if (!IS_VALID_BW(np_cfg[i].min_bw) || @@ -581,7 +641,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_info nic_info; struct qlcnic_npar_func_cfg *np_cfg; - int i, count, rem, ret; + int i, count, rem, ret, index; u8 pci_func; count = size / sizeof(struct qlcnic_npar_func_cfg); @@ -594,8 +654,10 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, if (ret) return ret; - for (i = 0; i < count ; i++) { + for (i = 0; i < count; i++) { pci_func = np_cfg[i].pci_func; + + memset(&nic_info, 0, sizeof(struct qlcnic_info)); ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func); if (ret) return ret; @@ -605,12 +667,12 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, ret = qlcnic_set_nic_info(adapter, &nic_info); if (ret) return ret; - adapter->npars[i].min_bw = nic_info.min_tx_bw; - adapter->npars[i].max_bw = nic_info.max_tx_bw; + index = qlcnic_is_valid_nic_func(adapter, pci_func); + adapter->npars[index].min_bw = nic_info.min_tx_bw; + adapter->npars[index].max_bw = nic_info.max_tx_bw; } return size; - } static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, @@ -628,8 +690,12 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, if (size != sizeof(np_cfg)) return QL_STATUS_INVALID_PARAM; + memset(&nic_info, 0, sizeof(struct qlcnic_info)); + memset(&np_cfg, 0, + sizeof(struct qlcnic_npar_func_cfg) * QLCNIC_MAX_PCI_FUNC); + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - if (adapter->npars[i].type != QLCNIC_TYPE_NIC) + if (qlcnic_is_valid_nic_func(adapter, i) < 0) continue; ret = qlcnic_get_nic_info(adapter, &nic_info, i); if (ret) @@ -644,6 +710,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, np_cfg[i].max_tx_queues = nic_info.max_tx_ques; np_cfg[i].max_rx_queues = nic_info.max_rx_ques; } + memcpy(buf, &np_cfg, size); return size; } @@ -659,6 +726,9 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file, struct qlcnic_esw_statistics port_stats; int ret; + if (qlcnic_83xx_check(adapter)) + return QLC_STATUS_UNSUPPORTED_CMD; + if (size != sizeof(struct qlcnic_esw_statistics)) return QL_STATUS_INVALID_PARAM; @@ -691,6 +761,9 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file, struct qlcnic_esw_statistics esw_stats; int ret; + if (qlcnic_83xx_check(adapter)) + return QLC_STATUS_UNSUPPORTED_CMD; + if (size != sizeof(struct qlcnic_esw_statistics)) return QL_STATUS_INVALID_PARAM; @@ -722,6 +795,9 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file, struct qlcnic_adapter *adapter = dev_get_drvdata(dev); int ret; + if (qlcnic_83xx_check(adapter)) + return QLC_STATUS_UNSUPPORTED_CMD; + if (offset >= QLCNIC_NIU_MAX_XG_PORTS) return QL_STATUS_INVALID_PARAM; @@ -744,10 +820,14 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file, char *buf, loff_t offset, size_t size) { + struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); int ret; + if (qlcnic_83xx_check(adapter)) + return QLC_STATUS_UNSUPPORTED_CMD; + if (offset >= QLCNIC_MAX_PCI_FUNC) return QL_STATUS_INVALID_PARAM; @@ -789,7 +869,10 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, return ret; } - for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) { + memset(&pci_cfg, 0, + sizeof(struct qlcnic_pci_func_cfg) * QLCNIC_MAX_PCI_FUNC); + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { pci_cfg[i].pci_func = pci_info[i].id; pci_cfg[i].func_type = pci_info[i].type; pci_cfg[i].port_num = pci_info[i].default_port; @@ -797,6 +880,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, pci_cfg[i].max_bw = pci_info[i].tx_max_bw; memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN); } + memcpy(buf, &pci_cfg, size); kfree(pci_info); return size; @@ -897,7 +981,6 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter) void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; - u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); if (device_create_bin_file(dev, &bin_attr_port_stats)) dev_info(dev, "failed to create port stats sysfs entry"); @@ -911,9 +994,6 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) if (device_create_bin_file(dev, &bin_attr_mem)) dev_info(dev, "failed to create mem sysfs entry\n"); - if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) - return; - if (device_create_bin_file(dev, &bin_attr_pci_config)) dev_info(dev, "failed to create pci config sysfs entry"); if (device_create_file(dev, &dev_attr_beacon)) @@ -936,7 +1016,6 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; - u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); device_remove_bin_file(dev, &bin_attr_port_stats); @@ -945,8 +1024,6 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) device_remove_file(dev, &dev_attr_diag_mode); device_remove_bin_file(dev, &bin_attr_crb); device_remove_bin_file(dev, &bin_attr_mem); - if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) - return; device_remove_bin_file(dev, &bin_attr_pci_config); device_remove_file(dev, &dev_attr_beacon); if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) @@ -958,3 +1035,23 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) device_remove_bin_file(dev, &bin_attr_pm_config); device_remove_bin_file(dev, &bin_attr_esw_stats); } + +void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter) +{ + qlcnic_create_diag_entries(adapter); +} + +void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter) +{ + qlcnic_remove_diag_entries(adapter); +} + +void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter) +{ + qlcnic_create_diag_entries(adapter); +} + +void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter) +{ + qlcnic_remove_diag_entries(adapter); +} diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 3e73742024b0..b13ab544a7eb 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2920,14 +2920,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev, /* * Allocate small buffer queue control blocks. */ - rx_ring->sbq = - kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc), - GFP_KERNEL); - if (rx_ring->sbq == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Small buffer queue control block allocation failed.\n"); + rx_ring->sbq = kmalloc_array(rx_ring->sbq_len, + sizeof(struct bq_desc), + GFP_KERNEL); + if (rx_ring->sbq == NULL) goto err_mem; - } ql_init_sbq_ring(qdev, rx_ring); } @@ -2948,14 +2945,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev, /* * Allocate large buffer queue control blocks. */ - rx_ring->lbq = - kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc), - GFP_KERNEL); - if (rx_ring->lbq == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Large buffer queue control block allocation failed.\n"); + rx_ring->lbq = kmalloc_array(rx_ring->lbq_len, + sizeof(struct bq_desc), + GFP_KERNEL); + if (rx_ring->lbq == NULL) goto err_mem; - } ql_init_lbq_ring(qdev, rx_ring); } @@ -4572,7 +4566,6 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, qdev->mpi_coredump = vmalloc(sizeof(struct ql_mpi_coredump)); if (qdev->mpi_coredump == NULL) { - dev_err(&pdev->dev, "Coredump alloc failed.\n"); err = -ENOMEM; goto err_out2; } @@ -4586,7 +4579,6 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, goto err_out2; } - memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); /* Keep local copy of current mac address. */ memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); diff --git a/drivers/net/ethernet/racal/Kconfig b/drivers/net/ethernet/racal/Kconfig deleted file mode 100644 index 01969e0a9c68..000000000000 --- a/drivers/net/ethernet/racal/Kconfig +++ /dev/null @@ -1,33 +0,0 @@ -# -# Racal-Interlan device configuration -# - -config NET_VENDOR_RACAL - bool "Racal-Interlan (Micom) NI devices" - default y - depends on ISA - ---help--- - If you have a network (Ethernet) card belonging to this class, such - as the NI5010, NI5210 or NI6210, say Y and read the Ethernet-HOWTO, - available from <http://www.tldp.org/docs.html#howto>. - - Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about NI cards. If you say Y, you will be asked for - your specific card in the following questions. - -if NET_VENDOR_RACAL - -config NI5010 - tristate "NI5010 support (EXPERIMENTAL)" - depends on ISA && EXPERIMENTAL && BROKEN_ON_SMP - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. Note that this is still - experimental code. - - To compile this driver as a module, choose M here. The module - will be called ni5010. - -endif # NET_VENDOR_RACAL diff --git a/drivers/net/ethernet/racal/Makefile b/drivers/net/ethernet/racal/Makefile deleted file mode 100644 index 1e210ca1d78b..000000000000 --- a/drivers/net/ethernet/racal/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the Racal-Interlan network device drivers. -# - -obj-$(CONFIG_NI5010) += ni5010.o diff --git a/drivers/net/ethernet/racal/ni5010.c b/drivers/net/ethernet/racal/ni5010.c deleted file mode 100644 index 807982220050..000000000000 --- a/drivers/net/ethernet/racal/ni5010.c +++ /dev/null @@ -1,771 +0,0 @@ -/* ni5010.c: A network driver for the MiCom-Interlan NI5010 ethercard. - * - * Copyright 1996,1997,2006 Jan-Pascal van Best and Andreas Mohr. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * The authors may be reached as: - * janpascal@vanbest.org andi@lisas.de - * - * Sources: - * Donald Becker's "skeleton.c" - * Crynwr ni5010 packet driver - * - * Changes: - * v0.0: First test version - * v0.1: First working version - * v0.2: - * v0.3->v0.90: Now demand setting io and irq when loading as module - * 970430 v0.91: modified for Linux 2.1.14 - * v0.92: Implemented Andreas' (better) NI5010 probe - * 970503 v0.93: Fixed auto-irq failure on warm reboot (JB) - * 970623 v1.00: First kernel version (AM) - * 970814 v1.01: Added detection of onboard receive buffer size (AM) - * 060611 v1.02: slight cleanup: email addresses, driver modernization. - * Bugs: - * - not SMP-safe (no locking of I/O accesses) - * - Note that you have to patch ifconfig for the new /proc/net/dev - * format. It gives incorrect stats otherwise. - * - * To do: - * Fix all bugs :-) - * Move some stuff to chipset_init() - * Handle xmt errors other than collisions - * Complete merge with Andreas' driver - * Implement ring buffers (Is this useful? You can't squeeze - * too many packet in a 2k buffer!) - * Implement DMA (Again, is this useful? Some docs say DMA is - * slower than programmed I/O) - * - * Compile with: - * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ \ - * -DMODULE -c ni5010.c - * - * Insert with e.g.: - * insmod ni5010.ko io=0x300 irq=5 - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/bitops.h> -#include <asm/io.h> -#include <asm/dma.h> - -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> - -#include "ni5010.h" - -static const char boardname[] = "NI5010"; -static char version[] __initdata = - "ni5010.c: v1.02 20060611 Jan-Pascal van Best and Andreas Mohr\n"; - -/* bufsize_rcv == 0 means autoprobing */ -static unsigned int bufsize_rcv; - -#define JUMPERED_INTERRUPTS /* IRQ line jumpered on board */ -#undef JUMPERED_DMA /* No DMA used */ -#undef FULL_IODETECT /* Only detect in portlist */ - -#ifndef FULL_IODETECT -/* A zero-terminated list of I/O addresses to be probed. */ -static unsigned int ports[] __initdata = - { 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0 }; -#endif - -/* Use 0 for production, 1 for verification, >2 for debug */ -#ifndef NI5010_DEBUG -#define NI5010_DEBUG 0 -#endif - -/* Information that needs to be kept for each board. */ -struct ni5010_local { - int o_pkt_size; - spinlock_t lock; -}; - -/* Index to functions, as function prototypes. */ - -static int ni5010_probe1(struct net_device *dev, int ioaddr); -static int ni5010_open(struct net_device *dev); -static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev); -static irqreturn_t ni5010_interrupt(int irq, void *dev_id); -static void ni5010_rx(struct net_device *dev); -static void ni5010_timeout(struct net_device *dev); -static int ni5010_close(struct net_device *dev); -static void ni5010_set_multicast_list(struct net_device *dev); -static void reset_receiver(struct net_device *dev); - -static int process_xmt_interrupt(struct net_device *dev); -#define tx_done(dev) 1 -static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad); -static void chipset_init(struct net_device *dev, int startp); -static void dump_packet(void *buf, int len); -static void ni5010_show_registers(struct net_device *dev); - -static int io; -static int irq; - -struct net_device * __init ni5010_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct ni5010_local)); - int *port; - int err = 0; - - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - io = dev->base_addr; - irq = dev->irq; - } - - PRINTK2((KERN_DEBUG "%s: Entering ni5010_probe\n", dev->name)); - - if (io > 0x1ff) { /* Check a single specified location. */ - err = ni5010_probe1(dev, io); - } else if (io != 0) { /* Don't probe at all. */ - err = -ENXIO; - } else { -#ifdef FULL_IODETECT - for (io=0x200; io<0x400 && ni5010_probe1(dev, io) ; io+=0x20) - ; - if (io == 0x400) - err = -ENODEV; - -#else - for (port = ports; *port && ni5010_probe1(dev, *port); port++) - ; - if (!*port) - err = -ENODEV; -#endif /* FULL_IODETECT */ - } - if (err) - goto out; - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - release_region(dev->base_addr, NI5010_IO_EXTENT); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static inline int rd_port(int ioaddr) -{ - inb(IE_RBUF); - return inb(IE_SAPROM); -} - -static void __init trigger_irq(int ioaddr) -{ - outb(0x00, EDLC_RESET); /* Clear EDLC hold RESET state */ - outb(0x00, IE_RESET); /* Board reset */ - outb(0x00, EDLC_XMASK); /* Disable all Xmt interrupts */ - outb(0x00, EDLC_RMASK); /* Disable all Rcv interrupt */ - outb(0xff, EDLC_XCLR); /* Clear all pending Xmt interrupts */ - outb(0xff, EDLC_RCLR); /* Clear all pending Rcv interrupts */ - /* - * Transmit packet mode: Ignore parity, Power xcvr, - * Enable loopback - */ - outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE); - outb(RMD_BROADCAST, EDLC_RMODE); /* Receive normal&broadcast */ - outb(XM_ALL, EDLC_XMASK); /* Enable all Xmt interrupts */ - udelay(50); /* FIXME: Necessary? */ - outb(MM_EN_XMT|MM_MUX, IE_MMODE); /* Start transmission */ -} - -static const struct net_device_ops ni5010_netdev_ops = { - .ndo_open = ni5010_open, - .ndo_stop = ni5010_close, - .ndo_start_xmit = ni5010_send_packet, - .ndo_set_rx_mode = ni5010_set_multicast_list, - .ndo_tx_timeout = ni5010_timeout, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -}; - -/* - * This is the real probe routine. Linux has a history of friendly device - * probes on the ISA bus. A good device probes avoids doing writes, and - * verifies that the correct device exists and functions. - */ - -static int __init ni5010_probe1(struct net_device *dev, int ioaddr) -{ - static unsigned version_printed; - struct ni5010_local *lp; - int i; - unsigned int data = 0; - int boguscount = 40; - int err = -ENODEV; - - dev->base_addr = ioaddr; - dev->irq = irq; - - if (!request_region(ioaddr, NI5010_IO_EXTENT, boardname)) - return -EBUSY; - - /* - * This is no "official" probe method, I've rather tested which - * probe works best with my seven NI5010 cards - * (they have very different serial numbers) - * Suggestions or failure reports are very, very welcome ! - * But I think it is a relatively good probe method - * since it doesn't use any "outb" - * It should be nearly 100% reliable ! - * well-known WARNING: this probe method (like many others) - * will hang the system if a NE2000 card region is probed ! - * - * - Andreas - */ - - PRINTK2((KERN_DEBUG "%s: entering ni5010_probe1(%#3x)\n", - dev->name, ioaddr)); - - if (inb(ioaddr+0) == 0xff) - goto out; - - while ( (rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr) & - rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr)) != 0xff) - { - if (boguscount-- == 0) - goto out; - } - - PRINTK2((KERN_DEBUG "%s: I/O #1 passed!\n", dev->name)); - - for (i=0; i<32; i++) - if ( (data = rd_port(ioaddr)) != 0xff) break; - if (data==0xff) - goto out; - - PRINTK2((KERN_DEBUG "%s: I/O #2 passed!\n", dev->name)); - - if ((data != SA_ADDR0) || (rd_port(ioaddr) != SA_ADDR1) || - (rd_port(ioaddr) != SA_ADDR2)) - goto out; - - for (i=0; i<4; i++) - rd_port(ioaddr); - - if ( (rd_port(ioaddr) != NI5010_MAGICVAL1) || - (rd_port(ioaddr) != NI5010_MAGICVAL2) ) - goto out; - - PRINTK2((KERN_DEBUG "%s: I/O #3 passed!\n", dev->name)); - - if (NI5010_DEBUG && version_printed++ == 0) - printk(KERN_INFO "%s", version); - - printk("NI5010 ethercard probe at 0x%x: ", ioaddr); - - dev->base_addr = ioaddr; - - for (i=0; i<6; i++) { - outw(i, IE_GP); - dev->dev_addr[i] = inb(IE_SAPROM); - } - printk("%pM ", dev->dev_addr); - - PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name)); - -#ifdef JUMPERED_INTERRUPTS - if (dev->irq == 0xff) - ; - else if (dev->irq < 2) { - unsigned long irq_mask; - - PRINTK2((KERN_DEBUG "%s: I/O #5 passed!\n", dev->name)); - - irq_mask = probe_irq_on(); - trigger_irq(ioaddr); - mdelay(20); - dev->irq = probe_irq_off(irq_mask); - - PRINTK2((KERN_DEBUG "%s: I/O #6 passed!\n", dev->name)); - - if (dev->irq == 0) { - err = -EAGAIN; - printk(KERN_WARNING "%s: no IRQ found!\n", dev->name); - goto out; - } - PRINTK2((KERN_DEBUG "%s: I/O #7 passed!\n", dev->name)); - } else if (dev->irq == 2) { - dev->irq = 9; - } -#endif /* JUMPERED_INTERRUPTS */ - PRINTK2((KERN_DEBUG "%s: I/O #9 passed!\n", dev->name)); - - /* DMA is not supported (yet?), so no use detecting it */ - lp = netdev_priv(dev); - - spin_lock_init(&lp->lock); - - PRINTK2((KERN_DEBUG "%s: I/O #10 passed!\n", dev->name)); - -/* get the size of the onboard receive buffer - * higher addresses than bufsize are wrapped into real buffer - * i.e. data for offs. 0x801 is written to 0x1 with a 2K onboard buffer - */ - if (!bufsize_rcv) { - outb(1, IE_MMODE); /* Put Rcv buffer on system bus */ - outw(0, IE_GP); /* Point GP at start of packet */ - outb(0, IE_RBUF); /* set buffer byte 0 to 0 */ - for (i = 1; i < 0xff; i++) { - outw(i << 8, IE_GP); /* Point GP at packet size to be tested */ - outb(i, IE_RBUF); - outw(0x0, IE_GP); /* Point GP at start of packet */ - data = inb(IE_RBUF); - if (data == i) break; - } - bufsize_rcv = i << 8; - outw(0, IE_GP); /* Point GP at start of packet */ - outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */ - } - printk("-> bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE); - - dev->netdev_ops = &ni5010_netdev_ops; - dev->watchdog_timeo = HZ/20; - - dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */ - - /* Shut up the ni5010 */ - outb(0, EDLC_RMASK); /* Mask all receive interrupts */ - outb(0, EDLC_XMASK); /* Mask all xmit interrupts */ - outb(0xff, EDLC_RCLR); /* Kill all pending rcv interrupts */ - outb(0xff, EDLC_XCLR); /* Kill all pending xmt interrupts */ - - printk(KERN_INFO "%s: NI5010 found at 0x%x, using IRQ %d", dev->name, ioaddr, dev->irq); - if (dev->dma) - printk(" & DMA %d", dev->dma); - printk(".\n"); - return 0; -out: - release_region(dev->base_addr, NI5010_IO_EXTENT); - return err; -} - -/* - * Open/initialize the board. This is called (in the current kernel) - * sometime after booting when the 'ifconfig' program is run. - * - * This routine should set everything up anew at each open, even - * registers that "should" only need to be set once at boot, so that - * there is a non-reboot way to recover if something goes wrong. - */ - -static int ni5010_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - int i; - - PRINTK2((KERN_DEBUG "%s: entering ni5010_open()\n", dev->name)); - - if (request_irq(dev->irq, ni5010_interrupt, 0, boardname, dev)) { - printk(KERN_WARNING "%s: Cannot get irq %#2x\n", dev->name, dev->irq); - return -EAGAIN; - } - PRINTK3((KERN_DEBUG "%s: passed open() #1\n", dev->name)); - /* - * Always allocate the DMA channel after the IRQ, - * and clean up on failure. - */ -#ifdef JUMPERED_DMA - if (request_dma(dev->dma, cardname)) { - printk(KERN_WARNING "%s: Cannot get dma %#2x\n", dev->name, dev->dma); - free_irq(dev->irq, NULL); - return -EAGAIN; - } -#endif /* JUMPERED_DMA */ - - PRINTK3((KERN_DEBUG "%s: passed open() #2\n", dev->name)); - /* Reset the hardware here. Don't forget to set the station address. */ - - outb(RS_RESET, EDLC_RESET); /* Hold up EDLC_RESET while configing board */ - outb(0, IE_RESET); /* Hardware reset of ni5010 board */ - outb(XMD_LBC, EDLC_XMODE); /* Only loopback xmits */ - - PRINTK3((KERN_DEBUG "%s: passed open() #3\n", dev->name)); - /* Set the station address */ - for(i = 0;i < 6; i++) { - outb(dev->dev_addr[i], EDLC_ADDR + i); - } - - PRINTK3((KERN_DEBUG "%s: Initialising ni5010\n", dev->name)); - outb(0, EDLC_XMASK); /* No xmit interrupts for now */ - outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE); - /* Normal packet xmit mode */ - outb(0xff, EDLC_XCLR); /* Clear all pending xmit interrupts */ - outb(RMD_BROADCAST, EDLC_RMODE); - /* Receive broadcast and normal packets */ - reset_receiver(dev); /* Ready ni5010 for receiving packets */ - - outb(0, EDLC_RESET); /* Un-reset the ni5010 */ - - netif_start_queue(dev); - - if (NI5010_DEBUG) ni5010_show_registers(dev); - - PRINTK((KERN_DEBUG "%s: open successful\n", dev->name)); - return 0; -} - -static void reset_receiver(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - PRINTK3((KERN_DEBUG "%s: resetting receiver\n", dev->name)); - outw(0, IE_GP); /* Receive packet at start of buffer */ - outb(0xff, EDLC_RCLR); /* Clear all pending rcv interrupts */ - outb(0, IE_MMODE); /* Put EDLC to rcv buffer */ - outb(MM_EN_RCV, IE_MMODE); /* Enable rcv */ - outb(0xff, EDLC_RMASK); /* Enable all rcv interrupts */ -} - -static void ni5010_timeout(struct net_device *dev) -{ - printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, - tx_done(dev) ? "IRQ conflict" : "network cable problem"); - /* Try to restart the adaptor. */ - /* FIXME: Give it a real kick here */ - chipset_init(dev, 1); - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); -} - -static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev) -{ - int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; - - PRINTK2((KERN_DEBUG "%s: entering ni5010_send_packet\n", dev->name)); - - /* - * Block sending - */ - - netif_stop_queue(dev); - hardware_send_packet(dev, (unsigned char *)skb->data, skb->len, length-skb->len); - dev_kfree_skb (skb); - return NETDEV_TX_OK; -} - -/* - * The typical workload of the driver: - * Handle the network interface interrupts. - */ -static irqreturn_t ni5010_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct ni5010_local *lp; - int ioaddr, status; - int xmit_was_error = 0; - - PRINTK2((KERN_DEBUG "%s: entering ni5010_interrupt\n", dev->name)); - - ioaddr = dev->base_addr; - lp = netdev_priv(dev); - - spin_lock(&lp->lock); - status = inb(IE_ISTAT); - PRINTK3((KERN_DEBUG "%s: IE_ISTAT = %#02x\n", dev->name, status)); - - if ((status & IS_R_INT) == 0) ni5010_rx(dev); - - if ((status & IS_X_INT) == 0) { - xmit_was_error = process_xmt_interrupt(dev); - } - - if ((status & IS_DMA_INT) == 0) { - PRINTK((KERN_DEBUG "%s: DMA complete (?)\n", dev->name)); - outb(0, IE_DMA_RST); /* Reset DMA int */ - } - - if (!xmit_was_error) - reset_receiver(dev); - spin_unlock(&lp->lock); - return IRQ_HANDLED; -} - - -static void dump_packet(void *buf, int len) -{ - int i; - - printk(KERN_DEBUG "Packet length = %#4x\n", len); - for (i = 0; i < len; i++){ - if (i % 16 == 0) printk(KERN_DEBUG "%#4.4x", i); - if (i % 2 == 0) printk(" "); - printk("%2.2x", ((unsigned char *)buf)[i]); - if (i % 16 == 15) printk("\n"); - } - printk("\n"); -} - -/* We have a good packet, get it out of the buffer. */ -static void ni5010_rx(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - unsigned char rcv_stat; - struct sk_buff *skb; - int i_pkt_size; - - PRINTK2((KERN_DEBUG "%s: entering ni5010_rx()\n", dev->name)); - - rcv_stat = inb(EDLC_RSTAT); - PRINTK3((KERN_DEBUG "%s: EDLC_RSTAT = %#2x\n", dev->name, rcv_stat)); - - if ( (rcv_stat & RS_VALID_BITS) != RS_PKT_OK) { - PRINTK((KERN_INFO "%s: receive error.\n", dev->name)); - dev->stats.rx_errors++; - if (rcv_stat & RS_RUNT) dev->stats.rx_length_errors++; - if (rcv_stat & RS_ALIGN) dev->stats.rx_frame_errors++; - if (rcv_stat & RS_CRC_ERR) dev->stats.rx_crc_errors++; - if (rcv_stat & RS_OFLW) dev->stats.rx_fifo_errors++; - outb(0xff, EDLC_RCLR); /* Clear the interrupt */ - return; - } - - outb(0xff, EDLC_RCLR); /* Clear the interrupt */ - - i_pkt_size = inw(IE_RCNT); - if (i_pkt_size > ETH_FRAME_LEN || i_pkt_size < 10 ) { - PRINTK((KERN_DEBUG "%s: Packet size error, packet size = %#4.4x\n", - dev->name, i_pkt_size)); - dev->stats.rx_errors++; - dev->stats.rx_length_errors++; - return; - } - - /* Malloc up new buffer. */ - skb = netdev_alloc_skb(dev, i_pkt_size + 3); - if (skb == NULL) { - printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); - dev->stats.rx_dropped++; - return; - } - - skb_reserve(skb, 2); - - /* Read packet into buffer */ - outb(MM_MUX, IE_MMODE); /* Rcv buffer to system bus */ - outw(0, IE_GP); /* Seek to beginning of packet */ - insb(IE_RBUF, skb_put(skb, i_pkt_size), i_pkt_size); - - if (NI5010_DEBUG >= 4) - dump_packet(skb->data, skb->len); - - skb->protocol = eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += i_pkt_size; - - PRINTK2((KERN_DEBUG "%s: Received packet, size=%#4.4x\n", - dev->name, i_pkt_size)); -} - -static int process_xmt_interrupt(struct net_device *dev) -{ - struct ni5010_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - int xmit_stat; - - PRINTK2((KERN_DEBUG "%s: entering process_xmt_interrupt\n", dev->name)); - - xmit_stat = inb(EDLC_XSTAT); - PRINTK3((KERN_DEBUG "%s: EDLC_XSTAT = %2.2x\n", dev->name, xmit_stat)); - - outb(0, EDLC_XMASK); /* Disable xmit IRQ's */ - outb(0xff, EDLC_XCLR); /* Clear all pending xmit IRQ's */ - - if (xmit_stat & XS_COLL){ - PRINTK((KERN_DEBUG "%s: collision detected, retransmitting\n", - dev->name)); - outw(NI5010_BUFSIZE - lp->o_pkt_size, IE_GP); - /* outb(0, IE_MMODE); */ /* xmt buf on sysbus FIXME: needed ? */ - outb(MM_EN_XMT | MM_MUX, IE_MMODE); - outb(XM_ALL, EDLC_XMASK); /* Enable xmt IRQ's */ - dev->stats.collisions++; - return 1; - } - - /* FIXME: handle other xmt error conditions */ - - dev->stats.tx_packets++; - dev->stats.tx_bytes += lp->o_pkt_size; - netif_wake_queue(dev); - - PRINTK2((KERN_DEBUG "%s: sent packet, size=%#4.4x\n", - dev->name, lp->o_pkt_size)); - - return 0; -} - -/* The inverse routine to ni5010_open(). */ -static int ni5010_close(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - PRINTK2((KERN_DEBUG "%s: entering ni5010_close\n", dev->name)); -#ifdef JUMPERED_INTERRUPTS - free_irq(dev->irq, NULL); -#endif - /* Put card in held-RESET state */ - outb(0, IE_MMODE); - outb(RS_RESET, EDLC_RESET); - - netif_stop_queue(dev); - - PRINTK((KERN_DEBUG "%s: %s closed down\n", dev->name, boardname)); - return 0; - -} - -/* Set or clear the multicast filter for this adaptor. - num_addrs == -1 Promiscuous mode, receive all packets - num_addrs == 0 Normal mode, clear multicast list - num_addrs > 0 Multicast mode, receive normal and MC packets, and do - best-effort filtering. -*/ -static void ni5010_set_multicast_list(struct net_device *dev) -{ - short ioaddr = dev->base_addr; - - PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); - - if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI || - !netdev_mc_empty(dev)) { - outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ - PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); - } else { - PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name)); - outb(RMD_BROADCAST, EDLC_RMODE); /* Disable promiscuous mode, use normal mode */ - } -} - -static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad) -{ - struct ni5010_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - unsigned long flags; - unsigned int buf_offs; - - PRINTK2((KERN_DEBUG "%s: entering hardware_send_packet\n", dev->name)); - - if (length > ETH_FRAME_LEN) { - PRINTK((KERN_WARNING "%s: packet too large, not possible\n", - dev->name)); - return; - } - - if (NI5010_DEBUG) ni5010_show_registers(dev); - - if (inb(IE_ISTAT) & IS_EN_XMT) { - PRINTK((KERN_WARNING "%s: sending packet while already transmitting, not possible\n", - dev->name)); - return; - } - - if (NI5010_DEBUG > 3) dump_packet(buf, length); - - buf_offs = NI5010_BUFSIZE - length - pad; - - spin_lock_irqsave(&lp->lock, flags); - lp->o_pkt_size = length + pad; - - outb(0, EDLC_RMASK); /* Mask all receive interrupts */ - outb(0, IE_MMODE); /* Put Xmit buffer on system bus */ - outb(0xff, EDLC_RCLR); /* Clear out pending rcv interrupts */ - - outw(buf_offs, IE_GP); /* Point GP at start of packet */ - outsb(IE_XBUF, buf, length); /* Put data in buffer */ - while(pad--) - outb(0, IE_XBUF); - - outw(buf_offs, IE_GP); /* Rewrite where packet starts */ - - /* should work without that outb() (Crynwr used it) */ - /*outb(MM_MUX, IE_MMODE);*/ /* Xmt buffer to EDLC bus */ - outb(MM_EN_XMT | MM_MUX, IE_MMODE); /* Begin transmission */ - outb(XM_ALL, EDLC_XMASK); /* Cause interrupt after completion or fail */ - - spin_unlock_irqrestore(&lp->lock, flags); - - netif_wake_queue(dev); - - if (NI5010_DEBUG) ni5010_show_registers(dev); -} - -static void chipset_init(struct net_device *dev, int startp) -{ - /* FIXME: Move some stuff here */ - PRINTK3((KERN_DEBUG "%s: doing NOTHING in chipset_init\n", dev->name)); -} - -static void ni5010_show_registers(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - PRINTK3((KERN_DEBUG "%s: XSTAT %#2.2x\n", dev->name, inb(EDLC_XSTAT))); - PRINTK3((KERN_DEBUG "%s: XMASK %#2.2x\n", dev->name, inb(EDLC_XMASK))); - PRINTK3((KERN_DEBUG "%s: RSTAT %#2.2x\n", dev->name, inb(EDLC_RSTAT))); - PRINTK3((KERN_DEBUG "%s: RMASK %#2.2x\n", dev->name, inb(EDLC_RMASK))); - PRINTK3((KERN_DEBUG "%s: RMODE %#2.2x\n", dev->name, inb(EDLC_RMODE))); - PRINTK3((KERN_DEBUG "%s: XMODE %#2.2x\n", dev->name, inb(EDLC_XMODE))); - PRINTK3((KERN_DEBUG "%s: ISTAT %#2.2x\n", dev->name, inb(IE_ISTAT))); -} - -#ifdef MODULE -static struct net_device *dev_ni5010; - -module_param(io, int, 0); -module_param(irq, int, 0); -MODULE_PARM_DESC(io, "ni5010 I/O base address"); -MODULE_PARM_DESC(irq, "ni5010 IRQ number"); - -static int __init ni5010_init_module(void) -{ - PRINTK2((KERN_DEBUG "%s: entering init_module\n", boardname)); - /* - if(io <= 0 || irq == 0){ - printk(KERN_WARNING "%s: Autoprobing not allowed for modules.\n", boardname); - printk(KERN_WARNING "%s: Set symbols 'io' and 'irq'\n", boardname); - return -EINVAL; - } - */ - if (io <= 0){ - printk(KERN_WARNING "%s: Autoprobing for modules is hazardous, trying anyway..\n", boardname); - } - - PRINTK2((KERN_DEBUG "%s: init_module irq=%#2x, io=%#3x\n", boardname, irq, io)); - dev_ni5010 = ni5010_probe(-1); - if (IS_ERR(dev_ni5010)) - return PTR_ERR(dev_ni5010); - return 0; -} - -static void __exit ni5010_cleanup_module(void) -{ - PRINTK2((KERN_DEBUG "%s: entering cleanup_module\n", boardname)); - unregister_netdev(dev_ni5010); - release_region(dev_ni5010->base_addr, NI5010_IO_EXTENT); - free_netdev(dev_ni5010); -} -module_init(ni5010_init_module); -module_exit(ni5010_cleanup_module); -#endif /* MODULE */ -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/racal/ni5010.h b/drivers/net/ethernet/racal/ni5010.h deleted file mode 100644 index e10e717fcd76..000000000000 --- a/drivers/net/ethernet/racal/ni5010.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Racal-Interlan ni5010 Ethernet definitions - * - * This is an extension to the Linux operating system, and is covered by the - * same GNU General Public License that covers that work. - * - * copyrights (c) 1996 by Jan-Pascal van Best (jvbest@wi.leidenuniv.nl) - * - * I have done a look in the following sources: - * crynwr-packet-driver by Russ Nelson - */ - -#define NI5010_BUFSIZE 2048 /* number of bytes in a buffer */ - -#define NI5010_MAGICVAL0 0x00 /* magic-values for ni5010 card */ -#define NI5010_MAGICVAL1 0x55 -#define NI5010_MAGICVAL2 0xAA - -#define SA_ADDR0 0x02 -#define SA_ADDR1 0x07 -#define SA_ADDR2 0x01 - -/* The number of low I/O ports used by the ni5010 ethercard. */ -#define NI5010_IO_EXTENT 32 - -#define PRINTK(x) if (NI5010_DEBUG) printk x -#define PRINTK2(x) if (NI5010_DEBUG>=2) printk x -#define PRINTK3(x) if (NI5010_DEBUG>=3) printk x - -/* The various IE command registers */ -#define EDLC_XSTAT (ioaddr + 0x00) /* EDLC transmit csr */ -#define EDLC_XCLR (ioaddr + 0x00) /* EDLC transmit "Clear IRQ" */ -#define EDLC_XMASK (ioaddr + 0x01) /* EDLC transmit "IRQ Masks" */ -#define EDLC_RSTAT (ioaddr + 0x02) /* EDLC receive csr */ -#define EDLC_RCLR (ioaddr + 0x02) /* EDLC receive "Clear IRQ" */ -#define EDLC_RMASK (ioaddr + 0x03) /* EDLC receive "IRQ Masks" */ -#define EDLC_XMODE (ioaddr + 0x04) /* EDLC transmit Mode */ -#define EDLC_RMODE (ioaddr + 0x05) /* EDLC receive Mode */ -#define EDLC_RESET (ioaddr + 0x06) /* EDLC RESET register */ -#define EDLC_TDR1 (ioaddr + 0x07) /* "Time Domain Reflectometry" reg1 */ -#define EDLC_ADDR (ioaddr + 0x08) /* EDLC station address, 6 bytes */ - /* 0x0E doesn't exist for r/w */ -#define EDLC_TDR2 (ioaddr + 0x0f) /* "Time Domain Reflectometry" reg2 */ -#define IE_GP (ioaddr + 0x10) /* GP pointer (word register) */ - /* 0x11 is 2nd byte of GP Pointer */ -#define IE_RCNT (ioaddr + 0x10) /* Count of bytes in rcv'd packet */ - /* 0x11 is 2nd byte of "Byte Count" */ -#define IE_MMODE (ioaddr + 0x12) /* Memory Mode register */ -#define IE_DMA_RST (ioaddr + 0x13) /* IE DMA Reset. write only */ -#define IE_ISTAT (ioaddr + 0x13) /* IE Interrupt Status. read only */ -#define IE_RBUF (ioaddr + 0x14) /* IE Receive Buffer port */ -#define IE_XBUF (ioaddr + 0x15) /* IE Transmit Buffer port */ -#define IE_SAPROM (ioaddr + 0x16) /* window on station addr prom */ -#define IE_RESET (ioaddr + 0x17) /* any write causes Board Reset */ - -/* bits in EDLC_XSTAT, interrupt clear on write, status when read */ -#define XS_TPOK 0x80 /* transmit packet successful */ -#define XS_CS 0x40 /* carrier sense */ -#define XS_RCVD 0x20 /* transmitted packet received */ -#define XS_SHORT 0x10 /* transmission media is shorted */ -#define XS_UFLW 0x08 /* underflow. iff failed board */ -#define XS_COLL 0x04 /* collision occurred */ -#define XS_16COLL 0x02 /* 16th collision occurred */ -#define XS_PERR 0x01 /* parity error */ - -#define XS_CLR_UFLW 0x08 /* clear underflow */ -#define XS_CLR_COLL 0x04 /* clear collision */ -#define XS_CLR_16COLL 0x02 /* clear 16th collision */ -#define XS_CLR_PERR 0x01 /* clear parity error */ - -/* bits in EDLC_XMASK, mask/enable transmit interrupts. register is r/w */ -#define XM_TPOK 0x80 /* =1 to enable Xmt Pkt OK interrupts */ -#define XM_RCVD 0x20 /* =1 to enable Xmt Pkt Rcvd ints */ -#define XM_UFLW 0x08 /* =1 to enable Xmt Underflow ints */ -#define XM_COLL 0x04 /* =1 to enable Xmt Collision ints */ -#define XM_COLL16 0x02 /* =1 to enable Xmt 16th Coll ints */ -#define XM_PERR 0x01 /* =1 to enable Xmt Parity Error ints */ - /* note: always clear this bit */ -#define XM_ALL (XM_TPOK | XM_RCVD | XM_UFLW | XM_COLL | XM_COLL16) - -/* bits in EDLC_RSTAT, interrupt clear on write, status when read */ -#define RS_PKT_OK 0x80 /* received good packet */ -#define RS_RST_PKT 0x10 /* RESET packet received */ -#define RS_RUNT 0x08 /* Runt Pkt rcvd. Len < 64 Bytes */ -#define RS_ALIGN 0x04 /* Alignment error. not 8 bit aligned */ -#define RS_CRC_ERR 0x02 /* Bad CRC on rcvd pkt */ -#define RS_OFLW 0x01 /* overflow for rcv FIFO */ -#define RS_VALID_BITS ( RS_PKT_OK | RS_RST_PKT | RS_RUNT | RS_ALIGN | RS_CRC_ERR | RS_OFLW ) - /* all valid RSTAT bits */ - -#define RS_CLR_PKT_OK 0x80 /* clear rcvd packet interrupt */ -#define RS_CLR_RST_PKT 0x10 /* clear RESET packet received */ -#define RS_CLR_RUNT 0x08 /* clear Runt Pckt received */ -#define RS_CLR_ALIGN 0x04 /* clear Alignment error */ -#define RS_CLR_CRC_ERR 0x02 /* clear CRC error */ -#define RS_CLR_OFLW 0x01 /* clear rcv FIFO Overflow */ - -/* bits in EDLC_RMASK, mask/enable receive interrupts. register is r/w */ -#define RM_PKT_OK 0x80 /* =1 to enable rcvd good packet ints */ -#define RM_RST_PKT 0x10 /* =1 to enable RESET packet ints */ -#define RM_RUNT 0x08 /* =1 to enable Runt Pkt rcvd ints */ -#define RM_ALIGN 0x04 /* =1 to enable Alignment error ints */ -#define RM_CRC_ERR 0x02 /* =1 to enable Bad CRC error ints */ -#define RM_OFLW 0x01 /* =1 to enable overflow error ints */ - -/* bits in EDLC_RMODE, set Receive Packet mode. register is r/w */ -#define RMD_TEST 0x80 /* =1 for Chip testing. normally 0 */ -#define RMD_ADD_SIZ 0x10 /* =1 5-byte addr match. normally 0 */ -#define RMD_EN_RUNT 0x08 /* =1 enable runt rcv. normally 0 */ -#define RMD_EN_RST 0x04 /* =1 to rcv RESET pkt. normally 0 */ - -#define RMD_PROMISC 0x03 /* receive *all* packets. unusual */ -#define RMD_MULTICAST 0x02 /* receive multicasts too. unusual */ -#define RMD_BROADCAST 0x01 /* receive broadcasts & normal. usual */ -#define RMD_NO_PACKETS 0x00 /* don't receive any packets. unusual */ - -/* bits in EDLC_XMODE, set Transmit Packet mode. register is r/w */ -#define XMD_COLL_CNT 0xf0 /* coll's since success. read-only */ -#define XMD_IG_PAR 0x08 /* =1 to ignore parity. ALWAYS set */ -#define XMD_T_MODE 0x04 /* =1 to power xcvr. ALWAYS set this */ -#define XMD_LBC 0x02 /* =1 for loopbakc. normally set */ -#define XMD_DIS_C 0x01 /* =1 disables contention. normally 0 */ - -/* bits in EDLC_RESET, write only */ -#define RS_RESET 0x80 /* =1 to hold EDLC in reset state */ - -/* bits in IE_MMODE, write only */ -#define MM_EN_DMA 0x80 /* =1 begin DMA xfer, Cplt clrs it */ -#define MM_EN_RCV 0x40 /* =1 allows Pkt rcv. clr'd by rcv */ -#define MM_EN_XMT 0x20 /* =1 begin Xmt pkt. Cplt clrs it */ -#define MM_BUS_PAGE 0x18 /* =00 ALWAYS. Used when MUX=1 */ -#define MM_NET_PAGE 0x06 /* =00 ALWAYS. Used when MUX=0 */ -#define MM_MUX 0x01 /* =1 means Rcv Buff on system bus */ - /* =0 means Xmt Buff on system bus */ - -/* bits in IE_ISTAT, read only */ -#define IS_TDIAG 0x80 /* =1 if Diagnostic problem */ -#define IS_EN_RCV 0x20 /* =1 until frame is rcv'd cplt */ -#define IS_EN_XMT 0x10 /* =1 until frame is xmt'd cplt */ -#define IS_EN_DMA 0x08 /* =1 until DMA is cplt or aborted */ -#define IS_DMA_INT 0x04 /* =0 iff DMA done interrupt. */ -#define IS_R_INT 0x02 /* =0 iff unmasked Rcv interrupt */ -#define IS_X_INT 0x01 /* =0 iff unmasked Xmt interrupt */ - diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 63c13125db6c..5b4103db70f5 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -755,9 +755,6 @@ static void r6040_mac_address(struct net_device *dev) iowrite16(adrp[0], ioaddr + MID_0L); iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); - - /* Store MAC Address in perm_addr */ - memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); } static int r6040_open(struct net_device *dev) @@ -957,9 +954,9 @@ static void netdev_get_drvinfo(struct net_device *dev, { struct r6040_private *rp = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(rp->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) @@ -1045,7 +1042,7 @@ static int r6040_mii_probe(struct net_device *dev) } phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link, - 0, PHY_INTERFACE_MODE_MII); + PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { dev_err(&lp->pdev->dev, "could not attach to PHY\n"); @@ -1195,9 +1192,8 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) lp->mii_bus->name = "r6040_eth_mii"; snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", dev_name(&pdev->dev), card_idx); - lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + lp->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); if (!lp->mii_bus->irq) { - dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); err = -ENOMEM; goto err_out_mdio; } diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 5ac93323a40c..b62a32484f6a 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1949,7 +1949,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < 3; i++) ((__le16 *) (dev->dev_addr))[i] = cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); dev->netdev_ops = &cp_netdev_ops; netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 5dc161630127..1276ac71353a 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -991,7 +991,6 @@ static int rtl8139_init_one(struct pci_dev *pdev, for (i = 0; i < 3; i++) ((__le16 *) (dev->dev_addr))[i] = cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len)); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); /* The Rtl8139-specific entries in the device structure. */ dev->netdev_ops = &rtl8139_netdev_ops; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 998974f78742..8900398ba103 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -83,7 +83,7 @@ static const int multicast_filter_limit = 32; #define R8169_REGS_SIZE 256 #define R8169_NAPI_WEIGHT 64 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ -#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ +#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */ #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) @@ -723,7 +723,6 @@ struct rtl8169_private { u16 mac_version; u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ - u32 dirty_rx; u32 dirty_tx; struct rtl8169_stats rx_stats; struct rtl8169_stats tx_stats; @@ -4136,7 +4135,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) { - tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0; + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; } static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) @@ -5870,7 +5869,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT)); /* The infamous DAC f*ckup only happens at boot time */ - if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) { + if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) { void __iomem *ioaddr = tp->mmio_addr; netif_info(tp, intr, dev, "disabling PCI DAC\n"); @@ -5985,10 +5984,8 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget unsigned int count; cur_rx = tp->cur_rx; - rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; - rx_left = min(rx_left, budget); - for (; rx_left > 0; rx_left--, cur_rx++) { + for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) { unsigned int entry = cur_rx % NUM_RX_DESC; struct RxDesc *desc = tp->RxDescArray + entry; u32 status; @@ -6066,8 +6063,6 @@ release_descriptor: count = cur_rx - tp->cur_rx; tp->cur_rx = cur_rx; - tp->dirty_rx += count; - return count; } @@ -6891,7 +6886,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Get MAC address */ for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = RTL_R8(MAC0 + i); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); dev->watchdog_timeo = RTL8169_TX_TIMEOUT; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 3d705862bd7d..33e96176e4d8 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -891,18 +891,16 @@ static int sh_eth_ring_init(struct net_device *ndev) mdp->rx_buf_sz += NET_IP_ALIGN; /* Allocate RX and TX skb rings */ - mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring, - GFP_KERNEL); + mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring, + sizeof(*mdp->rx_skbuff), GFP_KERNEL); if (!mdp->rx_skbuff) { - dev_err(&ndev->dev, "Cannot allocate Rx skb\n"); ret = -ENOMEM; return ret; } - mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring, - GFP_KERNEL); + mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring, + sizeof(*mdp->tx_skbuff), GFP_KERNEL); if (!mdp->tx_skbuff) { - dev_err(&ndev->dev, "Cannot allocate Tx skb\n"); ret = -ENOMEM; goto skb_ring_free; } @@ -1422,7 +1420,7 @@ static int sh_eth_phy_init(struct net_device *ndev) /* Try connect to PHY */ phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, - 0, mdp->phy_interface); + mdp->phy_interface); if (IS_ERR(phydev)) { dev_err(&ndev->dev, "phy_connect failed\n"); return PTR_ERR(phydev); diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c index 72fc57dd084d..21683e2b1ff4 100644 --- a/drivers/net/ethernet/s6gmac.c +++ b/drivers/net/ethernet/s6gmac.c @@ -795,7 +795,7 @@ static inline int s6gmac_phy_start(struct net_device *dev) struct phy_device *p = NULL; while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i]))) i++; - p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0, + p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, PHY_INTERFACE_MODE_RGMII); if (IS_ERR(p)) { printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); diff --git a/drivers/net/ethernet/seeq/Kconfig b/drivers/net/ethernet/seeq/Kconfig index 29f18533fdc7..a71e1ec068ed 100644 --- a/drivers/net/ethernet/seeq/Kconfig +++ b/drivers/net/ethernet/seeq/Kconfig @@ -26,17 +26,6 @@ config ARM_ETHER3 If you have an Acorn system with one of these network cards, you should say Y to this option if you wish to use it with Linux. -config SEEQ8005 - tristate "SEEQ8005 support (EXPERIMENTAL)" - depends on EXPERIMENTAL - ---help--- - This is a driver for the SEEQ 8005 network (Ethernet) card. If this - is for you, read the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called seeq8005. - config SGISEEQ tristate "SGI Seeq ethernet controller support" depends on SGI_HAS_SEEQ diff --git a/drivers/net/ethernet/seeq/Makefile b/drivers/net/ethernet/seeq/Makefile index 3e258a580c05..0488e99b831f 100644 --- a/drivers/net/ethernet/seeq/Makefile +++ b/drivers/net/ethernet/seeq/Makefile @@ -3,5 +3,4 @@ # obj-$(CONFIG_ARM_ETHER3) += ether3.o -obj-$(CONFIG_SEEQ8005) += seeq8005.o obj-$(CONFIG_SGISEEQ) += sgiseeq.o diff --git a/drivers/net/ethernet/seeq/seeq8005.c b/drivers/net/ethernet/seeq/seeq8005.c deleted file mode 100644 index d6e50de71186..000000000000 --- a/drivers/net/ethernet/seeq/seeq8005.c +++ /dev/null @@ -1,749 +0,0 @@ -/* seeq8005.c: A network driver for linux. */ -/* - Based on skeleton.c, - Written 1993-94 by Donald Becker. - See the skeleton.c file for further copyright information. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as hamish@zot.apana.org.au - - This file is a network device driver for the SEEQ 8005 chipset and - the Linux operating system. - -*/ - -static const char version[] = - "seeq8005.c:v1.00 8/07/95 Hamish Coleman (hamish@zot.apana.org.au)\n"; - -/* - Sources: - SEEQ 8005 databook - - Version history: - 1.00 Public release. cosmetic changes (no warnings now) - 0.68 Turning per- packet,interrupt debug messages off - testing for release. - 0.67 timing problems/bad buffer reads seem to be fixed now - 0.63 *!@$ protocol=eth_type_trans -- now packets flow - 0.56 Send working - 0.48 Receive working -*/ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/bitops.h> -#include <linux/jiffies.h> - -#include <asm/io.h> -#include <asm/dma.h> - -#include "seeq8005.h" - -/* First, a few definitions that the brave might change. */ -/* A zero-terminated list of I/O addresses to be probed. */ -static unsigned int seeq8005_portlist[] __initdata = - { 0x300, 0x320, 0x340, 0x360, 0}; - -/* use 0 for production, 1 for verification, >2 for debug */ -#ifndef NET_DEBUG -#define NET_DEBUG 1 -#endif -static unsigned int net_debug = NET_DEBUG; - -/* Information that need to be kept for each board. */ -struct net_local { - unsigned short receive_ptr; /* What address in packet memory do we expect a recv_pkt_header? */ - long open_time; /* Useless example local info. */ -}; - -/* The station (ethernet) address prefix, used for IDing the board. */ -#define SA_ADDR0 0x00 -#define SA_ADDR1 0x80 -#define SA_ADDR2 0x4b - -/* Index to functions, as function prototypes. */ - -static int seeq8005_probe1(struct net_device *dev, int ioaddr); -static int seeq8005_open(struct net_device *dev); -static void seeq8005_timeout(struct net_device *dev); -static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t seeq8005_interrupt(int irq, void *dev_id); -static void seeq8005_rx(struct net_device *dev); -static int seeq8005_close(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); - -/* Example routines you must write ;->. */ -#define tx_done(dev) (inw(SEEQ_STATUS) & SEEQSTAT_TX_ON) -static void hardware_send_packet(struct net_device *dev, char *buf, int length); -extern void seeq8005_init(struct net_device *dev, int startp); -static inline void wait_for_buffer(struct net_device *dev); - - -/* Check for a network adaptor of this type, and return '0' iff one exists. - If dev->base_addr == 0, probe all likely locations. - If dev->base_addr == 1, always return failure. - */ - -static int io = 0x320; -static int irq = 10; - -struct net_device * __init seeq8005_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); - unsigned *port; - int err = 0; - - if (!dev) - return ERR_PTR(-ENODEV); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - io = dev->base_addr; - irq = dev->irq; - } - - if (io > 0x1ff) { /* Check a single specified location. */ - err = seeq8005_probe1(dev, io); - } else if (io != 0) { /* Don't probe at all. */ - err = -ENXIO; - } else { - for (port = seeq8005_portlist; *port; port++) { - if (seeq8005_probe1(dev, *port) == 0) - break; - } - if (!*port) - err = -ENODEV; - } - if (err) - goto out; - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - release_region(dev->base_addr, SEEQ8005_IO_EXTENT); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static const struct net_device_ops seeq8005_netdev_ops = { - .ndo_open = seeq8005_open, - .ndo_stop = seeq8005_close, - .ndo_start_xmit = seeq8005_send_packet, - .ndo_tx_timeout = seeq8005_timeout, - .ndo_set_rx_mode = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/* This is the real probe routine. Linux has a history of friendly device - probes on the ISA bus. A good device probes avoids doing writes, and - verifies that the correct device exists and functions. */ - -static int __init seeq8005_probe1(struct net_device *dev, int ioaddr) -{ - static unsigned version_printed; - int i,j; - unsigned char SA_prom[32]; - int old_cfg1; - int old_cfg2; - int old_stat; - int old_dmaar; - int old_rear; - int retval; - - if (!request_region(ioaddr, SEEQ8005_IO_EXTENT, "seeq8005")) - return -ENODEV; - - if (net_debug>1) - printk("seeq8005: probing at 0x%x\n",ioaddr); - - old_stat = inw(SEEQ_STATUS); /* read status register */ - if (old_stat == 0xffff) { - retval = -ENODEV; - goto out; /* assume that 0xffff == no device */ - } - if ( (old_stat & 0x1800) != 0x1800 ) { /* assume that unused bits are 1, as my manual says */ - if (net_debug>1) { - printk("seeq8005: reserved stat bits != 0x1800\n"); - printk(" == 0x%04x\n",old_stat); - } - retval = -ENODEV; - goto out; - } - - old_rear = inw(SEEQ_REA); - if (old_rear == 0xffff) { - outw(0,SEEQ_REA); - if (inw(SEEQ_REA) == 0xffff) { /* assume that 0xffff == no device */ - retval = -ENODEV; - goto out; - } - } else if ((old_rear & 0xff00) != 0xff00) { /* assume that unused bits are 1 */ - if (net_debug>1) { - printk("seeq8005: unused rear bits != 0xff00\n"); - printk(" == 0x%04x\n",old_rear); - } - retval = -ENODEV; - goto out; - } - - old_cfg2 = inw(SEEQ_CFG2); /* read CFG2 register */ - old_cfg1 = inw(SEEQ_CFG1); - old_dmaar = inw(SEEQ_DMAAR); - - if (net_debug>4) { - printk("seeq8005: stat = 0x%04x\n",old_stat); - printk("seeq8005: cfg1 = 0x%04x\n",old_cfg1); - printk("seeq8005: cfg2 = 0x%04x\n",old_cfg2); - printk("seeq8005: raer = 0x%04x\n",old_rear); - printk("seeq8005: dmaar= 0x%04x\n",old_dmaar); - } - - outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD); /* setup for reading PROM */ - outw( 0, SEEQ_DMAAR); /* set starting PROM address */ - outw( SEEQCFG1_BUFFER_PROM, SEEQ_CFG1); /* set buffer to look at PROM */ - - - j=0; - for(i=0; i <32; i++) { - j+= SA_prom[i] = inw(SEEQ_BUFFER) & 0xff; - } - -#if 0 - /* untested because I only have the one card */ - if ( (j&0xff) != 0 ) { /* checksum appears to be 8bit = 0 */ - if (net_debug>1) { /* check this before deciding that we have a card */ - printk("seeq8005: prom sum error\n"); - } - outw( old_stat, SEEQ_STATUS); - outw( old_dmaar, SEEQ_DMAAR); - outw( old_cfg1, SEEQ_CFG1); - retval = -ENODEV; - goto out; - } -#endif - - outw( SEEQCFG2_RESET, SEEQ_CFG2); /* reset the card */ - udelay(5); - outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD); - - if (net_debug) { - printk("seeq8005: prom sum = 0x%08x\n",j); - for(j=0; j<32; j+=16) { - printk("seeq8005: prom %02x: ",j); - for(i=0;i<16;i++) { - printk("%02x ",SA_prom[j|i]); - } - printk(" "); - for(i=0;i<16;i++) { - if ((SA_prom[j|i]>31)&&(SA_prom[j|i]<127)) { - printk("%c", SA_prom[j|i]); - } else { - printk(" "); - } - } - printk("\n"); - } - } - -#if 0 - /* - * testing the packet buffer memory doesn't work yet - * but all other buffer accesses do - * - fixing is not a priority - */ - if (net_debug>1) { /* test packet buffer memory */ - printk("seeq8005: testing packet buffer ... "); - outw( SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1); - outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD); - outw( 0 , SEEQ_DMAAR); - for(i=0;i<32768;i++) { - outw(0x5a5a, SEEQ_BUFFER); - } - j=jiffies+HZ; - while ( ((inw(SEEQ_STATUS) & SEEQSTAT_FIFO_EMPTY) != SEEQSTAT_FIFO_EMPTY) && time_before(jiffies, j) ) - mb(); - outw( 0 , SEEQ_DMAAR); - while ( ((inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, j+HZ)) - mb(); - if ( (inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT) - outw( SEEQCMD_WINDOW_INT_ACK | (inw(SEEQ_STATUS)& SEEQCMD_INT_MASK), SEEQ_CMD); - outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD); - j=0; - for(i=0;i<32768;i++) { - if (inw(SEEQ_BUFFER) != 0x5a5a) - j++; - } - if (j) { - printk("%i\n",j); - } else { - printk("ok.\n"); - } - } -#endif - - if (net_debug && version_printed++ == 0) - printk(version); - - printk("%s: %s found at %#3x, ", dev->name, "seeq8005", ioaddr); - - /* Fill in the 'dev' fields. */ - dev->base_addr = ioaddr; - dev->irq = irq; - - /* Retrieve and print the ethernet address. */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = SA_prom[i+6]; - printk("%pM", dev->dev_addr); - - if (dev->irq == 0xff) - ; /* Do nothing: a user-level program will set it. */ - else if (dev->irq < 2) { /* "Auto-IRQ" */ - unsigned long cookie = probe_irq_on(); - - outw( SEEQCMD_RX_INT_EN | SEEQCMD_SET_RX_ON | SEEQCMD_SET_RX_OFF, SEEQ_CMD ); - - dev->irq = probe_irq_off(cookie); - - if (net_debug >= 2) - printk(" autoirq is %d\n", dev->irq); - } else if (dev->irq == 2) - /* Fixup for users that don't know that IRQ 2 is really IRQ 9, - * or don't know which one to set. - */ - dev->irq = 9; - -#if 0 - { - int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev); - if (irqval) { - printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name, - dev->irq, irqval); - retval = -EAGAIN; - goto out; - } - } -#endif - dev->netdev_ops = &seeq8005_netdev_ops; - dev->watchdog_timeo = HZ/20; - dev->flags &= ~IFF_MULTICAST; - - return 0; -out: - release_region(ioaddr, SEEQ8005_IO_EXTENT); - return retval; -} - - -/* Open/initialize the board. This is called (in the current kernel) - sometime after booting when the 'ifconfig' program is run. - - This routine should set everything up anew at each open, even - registers that "should" only need to be set once at boot, so that - there is non-reboot way to recover if something goes wrong. - */ -static int seeq8005_open(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - - { - int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev); - if (irqval) { - printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name, - dev->irq, irqval); - return -EAGAIN; - } - } - - /* Reset the hardware here. Don't forget to set the station address. */ - seeq8005_init(dev, 1); - - lp->open_time = jiffies; - - netif_start_queue(dev); - return 0; -} - -static void seeq8005_timeout(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, - tx_done(dev) ? "IRQ conflict" : "network cable problem"); - /* Try to restart the adaptor. */ - seeq8005_init(dev, 1); - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); -} - -static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - short length = skb->len; - unsigned char *buf; - - if (length < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - length = ETH_ZLEN; - } - buf = skb->data; - - /* Block a timer-based transmit from overlapping */ - netif_stop_queue(dev); - - hardware_send_packet(dev, buf, length); - dev->stats.tx_bytes += length; - dev_kfree_skb (skb); - /* You might need to clean up and record Tx statistics here. */ - - return NETDEV_TX_OK; -} - -/* - * wait_for_buffer - * - * This routine waits for the SEEQ chip to assert that the FIFO is ready - * by checking for a window interrupt, and then clearing it. This has to - * occur in the interrupt handler! - */ -inline void wait_for_buffer(struct net_device * dev) -{ - int ioaddr = dev->base_addr; - unsigned long tmp; - int status; - - tmp = jiffies + HZ; - while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, tmp)) - cpu_relax(); - - if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT) - outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD); -} - -/* The typical workload of the driver: - Handle the network interface interrupts. */ -static irqreturn_t seeq8005_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct net_local *lp; - int ioaddr, status, boguscount = 0; - int handled = 0; - - ioaddr = dev->base_addr; - lp = netdev_priv(dev); - - status = inw(SEEQ_STATUS); - do { - if (net_debug >2) { - printk("%s: int, status=0x%04x\n",dev->name,status); - } - - if (status & SEEQSTAT_WINDOW_INT) { - handled = 1; - outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD); - if (net_debug) { - printk("%s: window int!\n",dev->name); - } - } - if (status & SEEQSTAT_TX_INT) { - handled = 1; - outw( SEEQCMD_TX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD); - dev->stats.tx_packets++; - netif_wake_queue(dev); /* Inform upper layers. */ - } - if (status & SEEQSTAT_RX_INT) { - handled = 1; - /* Got a packet(s). */ - seeq8005_rx(dev); - } - status = inw(SEEQ_STATUS); - } while ( (++boguscount < 10) && (status & SEEQSTAT_ANY_INT)) ; - - if(net_debug>2) { - printk("%s: eoi\n",dev->name); - } - return IRQ_RETVAL(handled); -} - -/* We have a good packet(s), get it/them out of the buffers. */ -static void seeq8005_rx(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int boguscount = 10; - int pkt_hdr; - int ioaddr = dev->base_addr; - - do { - int next_packet; - int pkt_len; - int i; - int status; - - status = inw(SEEQ_STATUS); - outw( lp->receive_ptr, SEEQ_DMAAR); - outw(SEEQCMD_FIFO_READ | SEEQCMD_RX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD); - wait_for_buffer(dev); - next_packet = ntohs(inw(SEEQ_BUFFER)); - pkt_hdr = inw(SEEQ_BUFFER); - - if (net_debug>2) { - printk("%s: 0x%04x recv next=0x%04x, hdr=0x%04x\n",dev->name,lp->receive_ptr,next_packet,pkt_hdr); - } - - if ((next_packet == 0) || ((pkt_hdr & SEEQPKTH_CHAIN)==0)) { /* Read all the frames? */ - return; /* Done for now */ - } - - if ((pkt_hdr & SEEQPKTS_DONE)==0) - break; - - if (next_packet < lp->receive_ptr) { - pkt_len = (next_packet + 0x10000 - ((DEFAULT_TEA+1)<<8)) - lp->receive_ptr - 4; - } else { - pkt_len = next_packet - lp->receive_ptr - 4; - } - - if (next_packet < ((DEFAULT_TEA+1)<<8)) { /* is the next_packet address sane? */ - printk("%s: recv packet ring corrupt, resetting board\n",dev->name); - seeq8005_init(dev,1); - return; - } - - lp->receive_ptr = next_packet; - - if (net_debug>2) { - printk("%s: recv len=0x%04x\n",dev->name,pkt_len); - } - - if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */ - dev->stats.rx_errors++; - if (pkt_hdr & SEEQPKTS_SHORT) dev->stats.rx_frame_errors++; - if (pkt_hdr & SEEQPKTS_DRIB) dev->stats.rx_frame_errors++; - if (pkt_hdr & SEEQPKTS_OVERSIZE) dev->stats.rx_over_errors++; - if (pkt_hdr & SEEQPKTS_CRC_ERR) dev->stats.rx_crc_errors++; - /* skip over this packet */ - outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD); - outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA); - } else { - /* Malloc up new buffer. */ - struct sk_buff *skb; - unsigned char *buf; - - skb = netdev_alloc_skb(dev, pkt_len); - if (skb == NULL) { - printk("%s: Memory squeeze, dropping packet.\n", dev->name); - dev->stats.rx_dropped++; - break; - } - skb_reserve(skb, 2); /* align data on 16 byte */ - buf = skb_put(skb,pkt_len); - - insw(SEEQ_BUFFER, buf, (pkt_len + 1) >> 1); - - if (net_debug>2) { - char * p = buf; - printk("%s: recv ",dev->name); - for(i=0;i<14;i++) { - printk("%02x ",*(p++)&0xff); - } - printk("\n"); - } - - skb->protocol=eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - } while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN)); - - /* If any worth-while packets have been received, netif_rx() - has done a mark_bh(NET_BH) for us and will work on them - when we get to the bottom-half routine. */ -} - -/* The inverse routine to net_open(). */ -static int seeq8005_close(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - lp->open_time = 0; - - netif_stop_queue(dev); - - /* Flush the Tx and disable Rx here. */ - outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD); - - free_irq(dev->irq, dev); - - /* Update the statistics here. */ - - return 0; - -} - -/* Set or clear the multicast filter for this adaptor. - num_addrs == -1 Promiscuous mode, receive all packets - num_addrs == 0 Normal mode, clear multicast list - num_addrs > 0 Multicast mode, receive normal and MC packets, and do - best-effort filtering. - */ -static void set_multicast_list(struct net_device *dev) -{ -/* - * I _could_ do up to 6 addresses here, but won't (yet?) - */ - -#if 0 - int ioaddr = dev->base_addr; -/* - * hmm, not even sure if my matching works _anyway_ - seem to be receiving - * _everything_ . . . - */ - - if (num_addrs) { /* Enable promiscuous mode */ - outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_ALL, SEEQ_CFG1); - dev->flags|=IFF_PROMISC; - } else { /* Disable promiscuous mode, use normal mode */ - outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_BROAD, SEEQ_CFG1); - } -#endif -} - -void seeq8005_init(struct net_device *dev, int startp) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - int i; - - outw(SEEQCFG2_RESET, SEEQ_CFG2); /* reset device */ - udelay(5); - - outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD); - outw( 0, SEEQ_DMAAR); /* load start address into both low and high byte */ -/* wait_for_buffer(dev); */ /* I think that you only need a wait for memory buffer */ - outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1); - - for(i=0;i<6;i++) { /* set Station address */ - outb(dev->dev_addr[i], SEEQ_BUFFER); - udelay(2); - } - - outw( SEEQCFG1_BUFFER_TEA, SEEQ_CFG1); /* set xmit end area pointer to 16K */ - outb( DEFAULT_TEA, SEEQ_BUFFER); /* this gives us 16K of send buffer and 48K of recv buffer */ - - lp->receive_ptr = (DEFAULT_TEA+1)<<8; /* so we can find our packet_header */ - outw( lp->receive_ptr, SEEQ_RPR); /* Receive Pointer Register is set to recv buffer memory */ - - outw( 0x00ff, SEEQ_REA); /* Receive Area End */ - - if (net_debug>4) { - printk("%s: SA0 = ",dev->name); - - outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD); - outw( 0, SEEQ_DMAAR); - outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1); - - for(i=0;i<6;i++) { - printk("%02x ",inb(SEEQ_BUFFER)); - } - printk("\n"); - } - - outw( SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD | SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1); - outw( SEEQCFG2_AUTO_REA | SEEQCFG2_CTRLO, SEEQ_CFG2); - outw( SEEQCMD_SET_RX_ON | SEEQCMD_TX_INT_EN | SEEQCMD_RX_INT_EN, SEEQ_CMD); - - if (net_debug>4) { - int old_cfg1; - old_cfg1 = inw(SEEQ_CFG1); - printk("%s: stat = 0x%04x\n",dev->name,inw(SEEQ_STATUS)); - printk("%s: cfg1 = 0x%04x\n",dev->name,old_cfg1); - printk("%s: cfg2 = 0x%04x\n",dev->name,inw(SEEQ_CFG2)); - printk("%s: raer = 0x%04x\n",dev->name,inw(SEEQ_REA)); - printk("%s: dmaar= 0x%04x\n",dev->name,inw(SEEQ_DMAAR)); - - } -} - - -static void hardware_send_packet(struct net_device * dev, char *buf, int length) -{ - int ioaddr = dev->base_addr; - int status = inw(SEEQ_STATUS); - int transmit_ptr = 0; - unsigned long tmp; - - if (net_debug>4) { - printk("%s: send 0x%04x\n",dev->name,length); - } - - /* Set FIFO to writemode and set packet-buffer address */ - outw( SEEQCMD_FIFO_WRITE | (status & SEEQCMD_INT_MASK), SEEQ_CMD); - outw( transmit_ptr, SEEQ_DMAAR); - - /* output SEEQ Packet header barfage */ - outw( htons(length + 4), SEEQ_BUFFER); - outw( SEEQPKTH_XMIT | SEEQPKTH_DATA_FOLLOWS | SEEQPKTH_XMIT_INT_EN, SEEQ_BUFFER ); - - /* blat the buffer */ - outsw( SEEQ_BUFFER, buf, (length +1) >> 1); - /* paranoia !! */ - outw( 0, SEEQ_BUFFER); - outw( 0, SEEQ_BUFFER); - - /* set address of start of transmit chain */ - outw( transmit_ptr, SEEQ_TPR); - - /* drain FIFO */ - tmp = jiffies; - while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && time_before(jiffies, tmp + HZ)) - mb(); - - /* doit ! */ - outw( SEEQCMD_WINDOW_INT_ACK | SEEQCMD_SET_TX_ON | (status & SEEQCMD_INT_MASK), SEEQ_CMD); - -} - - -#ifdef MODULE - -static struct net_device *dev_seeq; -MODULE_LICENSE("GPL"); -module_param(io, int, 0); -module_param(irq, int, 0); -MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address"); -MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number"); - -int __init init_module(void) -{ - dev_seeq = seeq8005_probe(-1); - return PTR_RET(dev_seeq); -} - -void __exit cleanup_module(void) -{ - unregister_netdev(dev_seeq); - release_region(dev_seeq->base_addr, SEEQ8005_IO_EXTENT); - free_netdev(dev_seeq); -} - -#endif /* MODULE */ diff --git a/drivers/net/ethernet/seeq/seeq8005.h b/drivers/net/ethernet/seeq/seeq8005.h deleted file mode 100644 index 5dfb0098c6ca..000000000000 --- a/drivers/net/ethernet/seeq/seeq8005.h +++ /dev/null @@ -1,156 +0,0 @@ -/* - * defines, etc for the seeq8005 - */ - -/* - * This file is distributed under GPL. - * - * This style and layout of this file is also copied - * from many of the other linux network device drivers. - */ - -/* The number of low I/O ports used by the ethercard. */ -#define SEEQ8005_IO_EXTENT 16 - -#define SEEQ_B (ioaddr) - -#define SEEQ_CMD (SEEQ_B) /* Write only */ -#define SEEQ_STATUS (SEEQ_B) /* Read only */ -#define SEEQ_CFG1 (SEEQ_B + 2) -#define SEEQ_CFG2 (SEEQ_B + 4) -#define SEEQ_REA (SEEQ_B + 6) /* Receive End Area Register */ -#define SEEQ_RPR (SEEQ_B + 10) /* Receive Pointer Register */ -#define SEEQ_TPR (SEEQ_B + 12) /* Transmit Pointer Register */ -#define SEEQ_DMAAR (SEEQ_B + 14) /* DMA Address Register */ -#define SEEQ_BUFFER (SEEQ_B + 8) /* Buffer Window Register */ - -#define DEFAULT_TEA (0x3f) - -#define SEEQCMD_DMA_INT_EN (0x0001) /* DMA Interrupt Enable */ -#define SEEQCMD_RX_INT_EN (0x0002) /* Receive Interrupt Enable */ -#define SEEQCMD_TX_INT_EN (0x0004) /* Transmit Interrupt Enable */ -#define SEEQCMD_WINDOW_INT_EN (0x0008) /* What the hell is this for?? */ -#define SEEQCMD_INT_MASK (0x000f) - -#define SEEQCMD_DMA_INT_ACK (0x0010) /* DMA ack */ -#define SEEQCMD_RX_INT_ACK (0x0020) -#define SEEQCMD_TX_INT_ACK (0x0040) -#define SEEQCMD_WINDOW_INT_ACK (0x0080) -#define SEEQCMD_ACK_ALL (0x00f0) - -#define SEEQCMD_SET_DMA_ON (0x0100) /* Enables DMA Request logic */ -#define SEEQCMD_SET_RX_ON (0x0200) /* Enables Packet RX */ -#define SEEQCMD_SET_TX_ON (0x0400) /* Starts TX run */ -#define SEEQCMD_SET_DMA_OFF (0x0800) -#define SEEQCMD_SET_RX_OFF (0x1000) -#define SEEQCMD_SET_TX_OFF (0x2000) -#define SEEQCMD_SET_ALL_OFF (0x3800) /* set all logic off */ - -#define SEEQCMD_FIFO_READ (0x4000) /* Set FIFO to read mode (read from Buffer) */ -#define SEEQCMD_FIFO_WRITE (0x8000) /* Set FIFO to write mode */ - -#define SEEQSTAT_DMA_INT_EN (0x0001) /* Status of interrupt enable */ -#define SEEQSTAT_RX_INT_EN (0x0002) -#define SEEQSTAT_TX_INT_EN (0x0004) -#define SEEQSTAT_WINDOW_INT_EN (0x0008) - -#define SEEQSTAT_DMA_INT (0x0010) /* Interrupt flagged */ -#define SEEQSTAT_RX_INT (0x0020) -#define SEEQSTAT_TX_INT (0x0040) -#define SEEQSTAT_WINDOW_INT (0x0080) -#define SEEQSTAT_ANY_INT (0x00f0) - -#define SEEQSTAT_DMA_ON (0x0100) /* DMA logic on */ -#define SEEQSTAT_RX_ON (0x0200) /* Packet RX on */ -#define SEEQSTAT_TX_ON (0x0400) /* TX running */ - -#define SEEQSTAT_FIFO_FULL (0x2000) -#define SEEQSTAT_FIFO_EMPTY (0x4000) -#define SEEQSTAT_FIFO_DIR (0x8000) /* 1=read, 0=write */ - -#define SEEQCFG1_BUFFER_MASK (0x000f) /* define what maps into the BUFFER register */ -#define SEEQCFG1_BUFFER_MAC0 (0x0000) /* MAC station addresses 0-5 */ -#define SEEQCFG1_BUFFER_MAC1 (0x0001) -#define SEEQCFG1_BUFFER_MAC2 (0x0002) -#define SEEQCFG1_BUFFER_MAC3 (0x0003) -#define SEEQCFG1_BUFFER_MAC4 (0x0004) -#define SEEQCFG1_BUFFER_MAC5 (0x0005) -#define SEEQCFG1_BUFFER_PROM (0x0006) /* The Address/CFG PROM */ -#define SEEQCFG1_BUFFER_TEA (0x0007) /* Transmit end area */ -#define SEEQCFG1_BUFFER_BUFFER (0x0008) /* Packet buffer memory */ -#define SEEQCFG1_BUFFER_INT_VEC (0x0009) /* Interrupt Vector */ - -#define SEEQCFG1_DMA_INTVL_MASK (0x0030) -#define SEEQCFG1_DMA_CONT (0x0000) -#define SEEQCFG1_DMA_800ns (0x0010) -#define SEEQCFG1_DMA_1600ns (0x0020) -#define SEEQCFG1_DMA_3200ns (0x0030) - -#define SEEQCFG1_DMA_LEN_MASK (0x00c0) -#define SEEQCFG1_DMA_LEN1 (0x0000) -#define SEEQCFG1_DMA_LEN2 (0x0040) -#define SEEQCFG1_DMA_LEN4 (0x0080) -#define SEEQCFG1_DMA_LEN8 (0x00c0) - -#define SEEQCFG1_MAC_MASK (0x3f00) /* Dis/enable bits for MAC addresses */ -#define SEEQCFG1_MAC0_EN (0x0100) -#define SEEQCFG1_MAC1_EN (0x0200) -#define SEEQCFG1_MAC2_EN (0x0400) -#define SEEQCFG1_MAC3_EN (0x0800) -#define SEEQCFG1_MAC4_EN (0x1000) -#define SEEQCFG1_MAC5_EN (0x2000) - -#define SEEQCFG1_MATCH_MASK (0xc000) /* Packet matching logic cfg bits */ -#define SEEQCFG1_MATCH_SPECIFIC (0x0000) /* only matching MAC addresses */ -#define SEEQCFG1_MATCH_BROAD (0x4000) /* matching and broadcast addresses */ -#define SEEQCFG1_MATCH_MULTI (0x8000) /* matching, broadcast and multicast */ -#define SEEQCFG1_MATCH_ALL (0xc000) /* Promiscuous mode */ - -#define SEEQCFG1_DEFAULT (SEEQCFG1_BUFFER_BUFFER | SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD) - -#define SEEQCFG2_BYTE_SWAP (0x0001) /* 0=Intel byte-order */ -#define SEEQCFG2_AUTO_REA (0x0002) /* if set, Receive End Area will be updated when reading from Buffer */ - -#define SEEQCFG2_CRC_ERR_EN (0x0008) /* enables receiving of packets with CRC errors */ -#define SEEQCFG2_DRIBBLE_EN (0x0010) /* enables receiving of non-aligned packets */ -#define SEEQCFG2_SHORT_EN (0x0020) /* enables receiving of short packets */ - -#define SEEQCFG2_SLOTSEL (0x0040) /* 0= standard IEEE802.3, 1= smaller,faster, non-standard */ -#define SEEQCFG2_NO_PREAM (0x0080) /* 1= user supplies Xmit preamble bytes */ -#define SEEQCFG2_ADDR_LEN (0x0100) /* 1= 2byte addresses */ -#define SEEQCFG2_REC_CRC (0x0200) /* 0= received packets will have CRC stripped from them */ -#define SEEQCFG2_XMIT_NO_CRC (0x0400) /* don't xmit CRC with each packet (user supplies it) */ -#define SEEQCFG2_LOOPBACK (0x0800) -#define SEEQCFG2_CTRLO (0x1000) -#define SEEQCFG2_RESET (0x8000) /* software Hard-reset bit */ - -struct seeq_pkt_hdr { - unsigned short next; /* address of next packet header */ - unsigned char babble_int:1, /* enable int on >1514 byte packet */ - coll_int:1, /* enable int on collision */ - coll_16_int:1, /* enable int on >15 collision */ - xmit_int:1, /* enable int on success (or xmit with <15 collision) */ - unused:1, - data_follows:1, /* if not set, process this as a header and pointer only */ - chain_cont:1, /* if set, more headers in chain only cmd bit valid in recv header */ - xmit_recv:1; /* if set, a xmit packet, else a receive packet.*/ - unsigned char status; -}; - -#define SEEQPKTH_BAB_INT_EN (0x01) /* xmit only */ -#define SEEQPKTH_COL_INT_EN (0x02) /* xmit only */ -#define SEEQPKTH_COL16_INT_EN (0x04) /* xmit only */ -#define SEEQPKTH_XMIT_INT_EN (0x08) /* xmit only */ -#define SEEQPKTH_DATA_FOLLOWS (0x20) /* supposedly in xmit only */ -#define SEEQPKTH_CHAIN (0x40) /* more headers follow */ -#define SEEQPKTH_XMIT (0x80) - -#define SEEQPKTS_BABBLE (0x0100) /* xmit only */ -#define SEEQPKTS_OVERSIZE (0x0100) /* recv only */ -#define SEEQPKTS_COLLISION (0x0200) /* xmit only */ -#define SEEQPKTS_CRC_ERR (0x0200) /* recv only */ -#define SEEQPKTS_COLL16 (0x0400) /* xmit only */ -#define SEEQPKTS_DRIB (0x0400) /* recv only */ -#define SEEQPKTS_SHORT (0x0800) /* recv only */ -#define SEEQPKTS_DONE (0x8000) -#define SEEQPKTS_ANY_ERROR (0x0f00) diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 0767043f44a4..3f93624fc273 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1439,7 +1439,7 @@ static int efx_phc_settime(struct ptp_clock_info *ptp, delta = timespec_sub(*e_ts, time_now); - efx_phc_adjtime(ptp, timespec_to_ns(&delta)); + rc = efx_phc_adjtime(ptp, timespec_to_ns(&delta)); if (rc != 0) return rc; diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index dc171b4961e4..7ed08c32a9c5 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -1565,9 +1565,9 @@ static void ioc3_get_drvinfo (struct net_device *dev, { struct ioc3_private *ip = netdev_priv(dev); - strcpy (info->driver, IOC3_NAME); - strcpy (info->version, IOC3_VERSION); - strcpy (info->bus_info, pci_name(ip->pdev)); + strlcpy(info->driver, IOC3_NAME, sizeof(info->driver)); + strlcpy(info->version, IOC3_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info)); } static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c index b2315324cc6d..28f7268f1b88 100644 --- a/drivers/net/ethernet/silan/sc92031.c +++ b/drivers/net/ethernet/silan/sc92031.c @@ -1458,12 +1458,12 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id) mac0 = ioread32(port_base + MAC0); mac1 = ioread32(port_base + MAC0 + 4); - dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24; - dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16; - dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8; - dev->dev_addr[3] = dev->perm_addr[3] = mac0; - dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8; - dev->dev_addr[5] = dev->perm_addr[5] = mac1; + dev->dev_addr[0] = mac0 >> 24; + dev->dev_addr[1] = mac0 >> 16; + dev->dev_addr[2] = mac0 >> 8; + dev->dev_addr[3] = mac0; + dev->dev_addr[4] = mac1 >> 8; + dev->dev_addr[5] = mac1; err = register_netdev(dev); if (err < 0) diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 5bffd9749a58..efca14eaefa9 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -247,8 +247,7 @@ static const struct ethtool_ops sis900_ethtool_ops; * @net_dev: the net device to get address for * * Older SiS900 and friends, use EEPROM to store MAC address. - * MAC address is read from read_eeprom() into @net_dev->dev_addr and - * @net_dev->perm_addr. + * MAC address is read from read_eeprom() into @net_dev->dev_addr. */ static int sis900_get_mac_addr(struct pci_dev *pci_dev, @@ -271,9 +270,6 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev, for (i = 0; i < 3; i++) ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); - /* Store MAC Address in perm_addr */ - memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); - return 1; } @@ -284,8 +280,7 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev, * * SiS630E model, use APC CMOS RAM to store MAC address. * APC CMOS RAM is accessed through ISA bridge. - * MAC address is read into @net_dev->dev_addr and - * @net_dev->perm_addr. + * MAC address is read into @net_dev->dev_addr. */ static int sis630e_get_mac_addr(struct pci_dev *pci_dev, @@ -311,9 +306,6 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev, ((u8 *)(net_dev->dev_addr))[i] = inb(0x71); } - /* Store MAC Address in perm_addr */ - memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); - pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40); pci_dev_put(isa_bridge); @@ -328,7 +320,7 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev, * * SiS635 model, set MAC Reload Bit to load Mac address from APC * to rfdr. rfdr is accessed through rfcr. MAC address is read into - * @net_dev->dev_addr and @net_dev->perm_addr. + * @net_dev->dev_addr. */ static int sis635_get_mac_addr(struct pci_dev *pci_dev, @@ -353,9 +345,6 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev, *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr); } - /* Store MAC Address in perm_addr */ - memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); - /* enable packet filtering */ sw32(rfcr, rfcrSave | RFEN); @@ -375,7 +364,7 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev, * EEDONE signal to refuse EEPROM access by LAN. * The EEPROM map of SiS962 or SiS963 is different to SiS900. * The signature field in SiS962 or SiS963 spec is meaningless. - * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr. + * MAC address is read into @net_dev->dev_addr. */ static int sis96x_get_mac_addr(struct pci_dev *pci_dev, @@ -395,9 +384,6 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev, for (i = 0; i < 3; i++) mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr); - /* Store MAC Address in perm_addr */ - memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); - rc = 1; break; } diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 59a6f88da867..9dd842dbb859 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -1522,9 +1522,10 @@ smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) static void smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strncpy(info->driver, CARDNAME, sizeof(info->driver)); - strncpy(info->version, version, sizeof(info->version)); - strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); + strlcpy(info->driver, CARDNAME, sizeof(info->driver)); + strlcpy(info->version, version, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); } static int smc911x_ethtool_nwayreset(struct net_device *dev) @@ -2035,7 +2036,7 @@ static int smc911x_drv_probe(struct platform_device *pdev) struct net_device *ndev; struct resource *res; struct smc911x_local *lp; - unsigned int *addr; + void __iomem *addr; int ret; DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index a670d23d9340..591650a8de38 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -1597,9 +1597,10 @@ smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) static void smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strncpy(info->driver, CARDNAME, sizeof(info->driver)); - strncpy(info->version, version, sizeof(info->version)); - strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); + strlcpy(info->driver, CARDNAME, sizeof(info->driver)); + strlcpy(info->version, version, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); } static int smc_ethtool_nwayreset(struct net_device *dev) diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index e112877d15d3..da5cc9a3b34c 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -997,9 +997,8 @@ static int smsc911x_mii_probe(struct net_device *dev) SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X", phydev->addr, phydev->phy_id); - ret = phy_connect_direct(dev, phydev, - &smsc911x_phy_adjust_link, 0, - pdata->config.phy_interface); + ret = phy_connect_direct(dev, phydev, &smsc911x_phy_adjust_link, + pdata->config.phy_interface); if (ret) { netdev_err(dev, "Could not attach to PHY\n"); @@ -1831,7 +1830,6 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); spin_lock_irq(&pdata->mac_lock); diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 3c586585e1b3..d457fa2d7509 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -1179,7 +1179,7 @@ static int smsc9420_mii_probe(struct net_device *dev) phydev->phy_id); phydev = phy_connect(dev, dev_name(&phydev->dev), - smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII); + smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { pr_err("%s: Could not attach to PHY\n", dev->name); @@ -1250,12 +1250,11 @@ static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd) BUG_ON(!pd->tx_ring); - pd->tx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) * - TX_RING_SIZE), GFP_KERNEL); - if (!pd->tx_buffers) { - smsc_warn(IFUP, "Failed to allocated tx_buffers"); + pd->tx_buffers = kmalloc_array(TX_RING_SIZE, + sizeof(struct smsc9420_ring_info), + GFP_KERNEL); + if (!pd->tx_buffers) return -ENOMEM; - } /* Initialize the TX Ring */ for (i = 0; i < TX_RING_SIZE; i++) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 1372ce210b58..d1ac39c1b05d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -210,8 +210,7 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev, strlcpy(info->driver, MAC100_ETHTOOL_NAME, sizeof(info->driver)); - strcpy(info->version, DRV_MODULE_VERSION); - info->fw_version[0] = '\0'; + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } static int stmmac_ethtool_getsettings(struct net_device *dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b75f4b286895..39c6c5524633 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -428,8 +428,7 @@ static int stmmac_init_phy(struct net_device *dev) priv->plat->phy_addr); pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt); - phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 0, - interface); + phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface); if (IS_ERR(phydev)) { pr_err("%s: Could not attach to PHY\n", dev->name); @@ -531,17 +530,18 @@ static void init_dma_desc_rings(struct net_device *dev) DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", txsize, rxsize, bfsize); - priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL); - priv->rx_skbuff = - kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL); + priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), + GFP_KERNEL); + priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), + GFP_KERNEL); priv->dma_rx = (struct dma_desc *)dma_alloc_coherent(priv->device, rxsize * sizeof(struct dma_desc), &priv->dma_rx_phy, GFP_KERNEL); - priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize, - GFP_KERNEL); + priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), + GFP_KERNEL); priv->dma_tx = (struct dma_desc *)dma_alloc_coherent(priv->device, txsize * @@ -2254,7 +2254,7 @@ static int __init stmmac_cmdline_opt(char *str) } else if (!strncmp(opt, "pause:", 6)) { if (kstrtoint(opt + 6, 0, &pause)) goto err; - } else if (!strncmp(opt, "eee_timer:", 6)) { + } else if (!strncmp(opt, "eee_timer:", 10)) { if (kstrtoint(opt + 10, 0, &eee_timer)) goto err; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 064eaac9616f..19b3a2567a46 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -102,6 +102,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev, priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr); if (!priv) { pr_err("%s: main driver probe failed", __func__); + ret = -ENODEV; goto err_out; } priv->dev->irq = pdev->irq; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index a0bdf0779466..e4c1c88e4c2a 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -4342,7 +4342,7 @@ static int niu_alloc_rx_ring_info(struct niu *np, { BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); - rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *), + rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *), GFP_KERNEL); if (!rp->rxhash) return -ENOMEM; @@ -8366,14 +8366,12 @@ static void niu_pci_vpd_validate(struct niu *np) return; } - memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN); + memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN); - val8 = dev->perm_addr[5]; - dev->perm_addr[5] += np->port; - if (dev->perm_addr[5] < val8) - dev->perm_addr[4]++; - - memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); + val8 = dev->dev_addr[5]; + dev->dev_addr[5] += np->port; + if (dev->dev_addr[5] < val8) + dev->dev_addr[4]++; } static int niu_pci_probe_sprom(struct niu *np) @@ -8470,29 +8468,27 @@ static int niu_pci_probe_sprom(struct niu *np) val = nr64(ESPC_MAC_ADDR0); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val); - dev->perm_addr[0] = (val >> 0) & 0xff; - dev->perm_addr[1] = (val >> 8) & 0xff; - dev->perm_addr[2] = (val >> 16) & 0xff; - dev->perm_addr[3] = (val >> 24) & 0xff; + dev->dev_addr[0] = (val >> 0) & 0xff; + dev->dev_addr[1] = (val >> 8) & 0xff; + dev->dev_addr[2] = (val >> 16) & 0xff; + dev->dev_addr[3] = (val >> 24) & 0xff; val = nr64(ESPC_MAC_ADDR1); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val); - dev->perm_addr[4] = (val >> 0) & 0xff; - dev->perm_addr[5] = (val >> 8) & 0xff; + dev->dev_addr[4] = (val >> 0) & 0xff; + dev->dev_addr[5] = (val >> 8) & 0xff; - if (!is_valid_ether_addr(&dev->perm_addr[0])) { + if (!is_valid_ether_addr(&dev->dev_addr[0])) { dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", - dev->perm_addr); + dev->dev_addr); return -EINVAL; } - val8 = dev->perm_addr[5]; - dev->perm_addr[5] += np->port; - if (dev->perm_addr[5] < val8) - dev->perm_addr[4]++; - - memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); + val8 = dev->dev_addr[5]; + dev->dev_addr[5] += np->port; + if (dev->dev_addr[5] < val8) + dev->dev_addr[4]++; val = nr64(ESPC_MOD_STR_LEN); netif_printk(np, probe, KERN_DEBUG, np->dev, @@ -9267,16 +9263,14 @@ static int niu_get_of_props(struct niu *np) netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n", dp->full_name, prop_len); } - memcpy(dev->perm_addr, mac_addr, dev->addr_len); - if (!is_valid_ether_addr(&dev->perm_addr[0])) { + memcpy(dev->dev_addr, mac_addr, dev->addr_len); + if (!is_valid_ether_addr(&dev->dev_addr[0])) { netdev_err(dev, "%s: OF MAC address is invalid\n", dp->full_name); - netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr); + netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr); return -EINVAL; } - memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); - model = of_get_property(dp, "model", &prop_len); if (model) diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index be82f6d13c51..5fafca065305 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -1042,8 +1042,8 @@ static void bigmac_set_multicast(struct net_device *dev) /* Ethtool support... */ static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, "sunbmac"); - strcpy(info->version, "2.0"); + strlcpy(info->driver, "sunbmac", sizeof(info->driver)); + strlcpy(info->version, "2.0", sizeof(info->version)); } static u32 bigmac_get_link(struct net_device *dev) diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 1dcee6915843..49bf3e2eb652 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -685,13 +685,14 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) struct sunqe *qep = netdev_priv(dev); struct platform_device *op; - strcpy(info->driver, "sunqe"); - strcpy(info->version, "3.0"); + strlcpy(info->driver, "sunqe", sizeof(info->driver)); + strlcpy(info->version, "3.0", sizeof(info->version)); op = qep->op; regs = of_get_property(op->dev.of_node, "reg", NULL); if (regs) - sprintf(info->bus_info, "SBUS:%d", regs->which_io); + snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d", + regs->which_io); } diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index e1b895530827..289b4eefb42f 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -882,8 +882,8 @@ static int vnet_set_mac_addr(struct net_device *dev, void *p) static void vnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } static u32 vnet_get_msglevel(struct net_device *dev) @@ -1032,8 +1032,6 @@ static struct vnet *vnet_new(const u64 *local_mac) for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - vp = netdev_priv(dev); spin_lock_init(&vp->lock); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 1e4d743ff03e..e15cc71b826d 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -2179,10 +2179,10 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct bdx_priv *priv = netdev_priv(netdev); - strlcat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver)); - strlcat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version)); - strlcat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); - strlcat(drvinfo->bus_info, pci_name(priv->pdev), + strlcpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(priv->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0); diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index d9625f62b026..31bbbca341a7 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -904,10 +904,9 @@ static int cpmac_set_ringparam(struct net_device *dev, static void cpmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, "cpmac"); - strcpy(info->version, CPMAC_VERSION); - info->fw_version[0] = '\0'; - sprintf(info->bus_info, "%s", "cpmac"); + strlcpy(info->driver, "cpmac", sizeof(info->driver)); + strlcpy(info->version, CPMAC_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac"); info->regdump_len = 0; } @@ -1173,8 +1172,8 @@ static int cpmac_probe(struct platform_device *pdev) snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); - priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0, - PHY_INTERFACE_MODE_MII); + priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, + PHY_INTERFACE_MODE_MII); if (IS_ERR(priv->phy)) { if (netif_msg_drv(priv)) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 40aff684aa23..7e93df6585e7 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -32,6 +32,7 @@ #include <linux/of.h> #include <linux/of_net.h> #include <linux/of_device.h> +#include <linux/if_vlan.h> #include <linux/platform_data/cpsw.h> @@ -118,6 +119,13 @@ do { \ #define TX_PRIORITY_MAPPING 0x33221100 #define CPDMA_TX_PRIORITY_MAP 0x76543210 +#define CPSW_VLAN_AWARE BIT(1) +#define CPSW_ALE_VLAN_AWARE 1 + +#define CPSW_FIFO_NORMAL_MODE (0 << 15) +#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) +#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) + #define cpsw_enable_irq(priv) \ do { \ u32 i; \ @@ -250,7 +258,7 @@ struct cpsw_ss_regs { struct cpsw_host_regs { u32 max_blks; u32 blk_cnt; - u32 flow_thresh; + u32 tx_in_ctl; u32 port_vlan; u32 tx_pri_map; u32 cpdma_tx_pri_map; @@ -277,6 +285,9 @@ struct cpsw_slave { u32 mac_control; struct cpsw_slave_data *data; struct phy_device *phy; + struct net_device *ndev; + u32 port_vlan; + u32 open_stat; }; static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) @@ -315,17 +326,65 @@ struct cpsw_priv { /* snapshot of IRQ numbers */ u32 irqs_table[4]; u32 num_irqs; - struct cpts cpts; + struct cpts *cpts; + u32 emac_port; }; #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) -#define for_each_slave(priv, func, arg...) \ - do { \ - int idx; \ - for (idx = 0; idx < (priv)->data.slaves; idx++) \ - (func)((priv)->slaves + idx, ##arg); \ +#define for_each_slave(priv, func, arg...) \ + do { \ + int idx; \ + if (priv->data.dual_emac) \ + (func)((priv)->slaves + priv->emac_port, ##arg);\ + else \ + for (idx = 0; idx < (priv)->data.slaves; idx++) \ + (func)((priv)->slaves + idx, ##arg); \ + } while (0) +#define cpsw_get_slave_ndev(priv, __slave_no__) \ + (priv->slaves[__slave_no__].ndev) +#define cpsw_get_slave_priv(priv, __slave_no__) \ + ((priv->slaves[__slave_no__].ndev) ? \ + netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ + +#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ + do { \ + if (!priv->data.dual_emac) \ + break; \ + if (CPDMA_RX_SOURCE_PORT(status) == 1) { \ + ndev = cpsw_get_slave_ndev(priv, 0); \ + priv = netdev_priv(ndev); \ + skb->dev = ndev; \ + } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \ + ndev = cpsw_get_slave_ndev(priv, 1); \ + priv = netdev_priv(ndev); \ + skb->dev = ndev; \ + } \ + } while (0) +#define cpsw_add_mcast(priv, addr) \ + do { \ + if (priv->data.dual_emac) { \ + struct cpsw_slave *slave = priv->slaves + \ + priv->emac_port; \ + int slave_port = cpsw_get_slave_port(priv, \ + slave->slave_num); \ + cpsw_ale_add_mcast(priv->ale, addr, \ + 1 << slave_port | 1 << priv->host_port, \ + ALE_VLAN, slave->port_vlan, 0); \ + } else { \ + cpsw_ale_add_mcast(priv->ale, addr, \ + ALE_ALL_PORTS << priv->host_port, \ + 0, 0, 0); \ + } \ } while (0) +static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) +{ + if (priv->host_port == 0) + return slave_num + 1; + else + return slave_num; +} + static void cpsw_ndo_set_rx_mode(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -344,8 +403,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) /* program multicast address list into ALE register */ netdev_for_each_mc_addr(ha, ndev) { - cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr, - ALE_ALL_PORTS << priv->host_port, 0, 0); + cpsw_add_mcast(priv, (u8 *)ha->addr); } } } @@ -374,9 +432,12 @@ void cpsw_tx_handler(void *token, int len, int status) struct net_device *ndev = skb->dev; struct cpsw_priv *priv = netdev_priv(ndev); + /* Check whether the queue is stopped due to stalled tx dma, if the + * queue is stopped then start the queue as we have free desc for tx + */ if (unlikely(netif_queue_stopped(ndev))) netif_start_queue(ndev); - cpts_tx_timestamp(&priv->cpts, skb); + cpts_tx_timestamp(priv->cpts, skb); priv->stats.tx_packets++; priv->stats.tx_bytes += len; dev_kfree_skb_any(skb); @@ -389,6 +450,8 @@ void cpsw_rx_handler(void *token, int len, int status) struct cpsw_priv *priv = netdev_priv(ndev); int ret = 0; + cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); + /* free and bail if we are shutting down */ if (unlikely(!netif_running(ndev)) || unlikely(!netif_carrier_ok(ndev))) { @@ -397,7 +460,7 @@ void cpsw_rx_handler(void *token, int len, int status) } if (likely(status >= 0)) { skb_put(skb, len); - cpts_rx_timestamp(&priv->cpts, skb); + cpts_rx_timestamp(priv->cpts, skb); skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb(skb); priv->stats.rx_bytes += len; @@ -417,7 +480,7 @@ void cpsw_rx_handler(void *token, int len, int status) return; ret = cpdma_chan_submit(priv->rxch, skb, skb->data, - skb_tailroom(skb), GFP_KERNEL); + skb_tailroom(skb), 0, GFP_KERNEL); } WARN_ON(ret < 0); } @@ -430,37 +493,38 @@ static irqreturn_t cpsw_interrupt(int irq, void *dev_id) cpsw_intr_disable(priv); cpsw_disable_irq(priv); napi_schedule(&priv->napi); + } else { + priv = cpsw_get_slave_priv(priv, 1); + if (likely(priv) && likely(netif_running(priv->ndev))) { + cpsw_intr_disable(priv); + cpsw_disable_irq(priv); + napi_schedule(&priv->napi); + } } return IRQ_HANDLED; } -static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) -{ - if (priv->host_port == 0) - return slave_num + 1; - else - return slave_num; -} - static int cpsw_poll(struct napi_struct *napi, int budget) { struct cpsw_priv *priv = napi_to_priv(napi); int num_tx, num_rx; num_tx = cpdma_chan_process(priv->txch, 128); - num_rx = cpdma_chan_process(priv->rxch, budget); - - if (num_rx || num_tx) - cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", - num_rx, num_tx); + if (num_tx) + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + num_rx = cpdma_chan_process(priv->rxch, budget); if (num_rx < budget) { napi_complete(napi); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); cpsw_enable_irq(priv); } + if (num_rx || num_tx) + cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", + num_rx, num_tx); + return num_rx; } @@ -559,6 +623,54 @@ static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) leader + strlen(name), val); } +static int cpsw_common_res_usage_state(struct cpsw_priv *priv) +{ + u32 i; + u32 usage_count = 0; + + if (!priv->data.dual_emac) + return 0; + + for (i = 0; i < priv->data.slaves; i++) + if (priv->slaves[i].open_stat) + usage_count++; + + return usage_count; +} + +static inline int cpsw_tx_packet_submit(struct net_device *ndev, + struct cpsw_priv *priv, struct sk_buff *skb) +{ + if (!priv->data.dual_emac) + return cpdma_chan_submit(priv->txch, skb, skb->data, + skb->len, 0, GFP_KERNEL); + + if (ndev == cpsw_get_slave_ndev(priv, 0)) + return cpdma_chan_submit(priv->txch, skb, skb->data, + skb->len, 1, GFP_KERNEL); + else + return cpdma_chan_submit(priv->txch, skb, skb->data, + skb->len, 2, GFP_KERNEL); +} + +static inline void cpsw_add_dual_emac_def_ale_entries( + struct cpsw_priv *priv, struct cpsw_slave *slave, + u32 slave_port) +{ + u32 port_mask = 1 << slave_port | 1 << priv->host_port; + + if (priv->version == CPSW_VERSION_1) + slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN); + else + slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN); + cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask, + port_mask, port_mask, 0); + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + port_mask, ALE_VLAN, slave->port_vlan, 0); + cpsw_ale_add_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, slave->port_vlan); +} + static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) { char name[32]; @@ -588,11 +700,14 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave_port = cpsw_get_slave_port(priv, slave->slave_num); - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - 1 << slave_port, 0, ALE_MCAST_FWD_2); + if (priv->data.dual_emac) + cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port); + else + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); slave->phy = phy_connect(priv->ndev, slave->data->phy_id, - &cpsw_adjust_link, 0, slave->data->phy_if); + &cpsw_adjust_link, slave->data->phy_if); if (IS_ERR(slave->phy)) { dev_err(priv->dev, "phy %s not found on slave %d\n", slave->data->phy_id, slave->slave_num); @@ -604,14 +719,44 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) } } +static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) +{ + const int vlan = priv->data.default_vlan; + const int port = priv->host_port; + u32 reg; + int i; + + reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : + CPSW2_PORT_VLAN; + + writel(vlan, &priv->host_port_regs->port_vlan); + + for (i = 0; i < 2; i++) + slave_write(priv->slaves + i, vlan, reg); + + cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, + ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, + (ALE_PORT_1 | ALE_PORT_2) << port); +} + static void cpsw_init_host_port(struct cpsw_priv *priv) { + u32 control_reg; + u32 fifo_mode; + /* soft reset the controller and initialize ale */ soft_reset("cpsw", &priv->regs->soft_reset); cpsw_ale_start(priv->ale); /* switch to vlan unaware mode */ - cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0); + cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE, + CPSW_ALE_VLAN_AWARE); + control_reg = readl(&priv->regs->control); + control_reg |= CPSW_VLAN_AWARE; + writel(control_reg, &priv->regs->control); + fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : + CPSW_FIFO_NORMAL_MODE; + writel(fifo_mode, &priv->host_port_regs->tx_in_ctl); /* setup host port priority mapping */ __raw_writel(CPDMA_TX_PRIORITY_MAP, @@ -621,9 +766,12 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) cpsw_ale_control_set(priv->ale, priv->host_port, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); - cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0); - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - 1 << priv->host_port, 0, ALE_MCAST_FWD_2); + if (!priv->data.dual_emac) { + cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, + 0, 0); + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2); + } } static int cpsw_ndo_open(struct net_device *ndev) @@ -632,7 +780,8 @@ static int cpsw_ndo_open(struct net_device *ndev) int i, ret; u32 reg; - cpsw_intr_disable(priv); + if (!cpsw_common_res_usage_state(priv)) + cpsw_intr_disable(priv); netif_carrier_off(ndev); pm_runtime_get_sync(&priv->pdev->dev); @@ -644,43 +793,55 @@ static int cpsw_ndo_open(struct net_device *ndev) CPSW_RTL_VERSION(reg)); /* initialize host and slave ports */ - cpsw_init_host_port(priv); + if (!cpsw_common_res_usage_state(priv)) + cpsw_init_host_port(priv); for_each_slave(priv, cpsw_slave_open, priv); - /* setup tx dma to fixed prio and zero offset */ - cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); - cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); + /* Add default VLAN */ + if (!priv->data.dual_emac) + cpsw_add_default_vlan(priv); - /* disable priority elevation and enable statistics on all ports */ - __raw_writel(0, &priv->regs->ptype); + if (!cpsw_common_res_usage_state(priv)) { + /* setup tx dma to fixed prio and zero offset */ + cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); + cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); - /* enable statistics collection only on the host port */ - __raw_writel(0x7, &priv->regs->stat_port_en); + /* disable priority elevation */ + __raw_writel(0, &priv->regs->ptype); - if (WARN_ON(!priv->data.rx_descs)) - priv->data.rx_descs = 128; + /* enable statistics collection only on all ports */ + __raw_writel(0x7, &priv->regs->stat_port_en); - for (i = 0; i < priv->data.rx_descs; i++) { - struct sk_buff *skb; + if (WARN_ON(!priv->data.rx_descs)) + priv->data.rx_descs = 128; - ret = -ENOMEM; - skb = netdev_alloc_skb_ip_align(priv->ndev, - priv->rx_packet_max); - if (!skb) - break; - ret = cpdma_chan_submit(priv->rxch, skb, skb->data, - skb_tailroom(skb), GFP_KERNEL); - if (WARN_ON(ret < 0)) - break; + for (i = 0; i < priv->data.rx_descs; i++) { + struct sk_buff *skb; + + ret = -ENOMEM; + skb = netdev_alloc_skb_ip_align(priv->ndev, + priv->rx_packet_max); + if (!skb) + break; + ret = cpdma_chan_submit(priv->rxch, skb, skb->data, + skb_tailroom(skb), 0, GFP_KERNEL); + if (WARN_ON(ret < 0)) + break; + } + /* continue even if we didn't manage to submit all + * receive descs + */ + cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); } - /* continue even if we didn't manage to submit all receive descs */ - cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); cpdma_ctlr_start(priv->dma); cpsw_intr_enable(priv); napi_enable(&priv->napi); - cpdma_ctlr_eoi(priv->dma); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + if (priv->data.dual_emac) + priv->slaves[priv->emac_port].open_stat = true; return 0; } @@ -701,12 +862,17 @@ static int cpsw_ndo_stop(struct net_device *ndev) netif_stop_queue(priv->ndev); napi_disable(&priv->napi); netif_carrier_off(priv->ndev); - cpsw_intr_disable(priv); - cpdma_ctlr_int_ctrl(priv->dma, false); - cpdma_ctlr_stop(priv->dma); - cpsw_ale_stop(priv->ale); + + if (cpsw_common_res_usage_state(priv) <= 1) { + cpsw_intr_disable(priv); + cpdma_ctlr_int_ctrl(priv->dma, false); + cpdma_ctlr_stop(priv->dma); + cpsw_ale_stop(priv->ale); + } for_each_slave(priv, cpsw_slave_stop, priv); pm_runtime_put_sync(&priv->pdev->dev); + if (priv->data.dual_emac) + priv->slaves[priv->emac_port].open_stat = false; return 0; } @@ -724,18 +890,24 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable) + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + priv->cpts->tx_enable) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_tx_timestamp(skb); - ret = cpdma_chan_submit(priv->txch, skb, skb->data, - skb->len, GFP_KERNEL); + ret = cpsw_tx_packet_submit(ndev, priv, skb); if (unlikely(ret != 0)) { cpsw_err(priv, tx_err, "desc submit failed\n"); goto fail; } + /* If there is no more tx desc left free then we need to + * tell the kernel to stop sending us tx frames. + */ + if (unlikely(cpdma_check_free_tx_desc(priv->txch))) + netif_stop_queue(ndev); + return NETDEV_TX_OK; fail: priv->stats.tx_dropped++; @@ -773,7 +945,7 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave]; u32 ts_en, seq_id; - if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) { + if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { slave_write(slave, 0, CPSW1_TS_CTL); return; } @@ -781,10 +953,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; - if (priv->cpts.tx_enable) + if (priv->cpts->tx_enable) ts_en |= CPSW_V1_TS_TX_EN; - if (priv->cpts.rx_enable) + if (priv->cpts->rx_enable) ts_en |= CPSW_V1_TS_RX_EN; slave_write(slave, ts_en, CPSW1_TS_CTL); @@ -793,16 +965,21 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) { - struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave]; + struct cpsw_slave *slave; u32 ctrl, mtype; + if (priv->data.dual_emac) + slave = &priv->slaves[priv->emac_port]; + else + slave = &priv->slaves[priv->data.cpts_active_slave]; + ctrl = slave_read(slave, CPSW2_CONTROL); ctrl &= ~CTRL_ALL_TS_MASK; - if (priv->cpts.tx_enable) + if (priv->cpts->tx_enable) ctrl |= CTRL_TX_TS_BITS; - if (priv->cpts.rx_enable) + if (priv->cpts->rx_enable) ctrl |= CTRL_RX_TS_BITS; mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; @@ -815,7 +992,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) { struct cpsw_priv *priv = netdev_priv(dev); - struct cpts *cpts = &priv->cpts; + struct cpts *cpts = priv->cpts; struct hwtstamp_config cfg; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) @@ -901,7 +1078,9 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) cpdma_chan_start(priv->txch); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + } static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) @@ -920,10 +1099,79 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev) cpsw_interrupt(ndev->irq, priv); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + } #endif +static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, + unsigned short vid) +{ + int ret; + + ret = cpsw_ale_add_vlan(priv->ale, vid, + ALE_ALL_PORTS << priv->host_port, + 0, ALE_ALL_PORTS << priv->host_port, + (ALE_PORT_1 | ALE_PORT_2) << priv->host_port); + if (ret != 0) + return ret; + + ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, vid); + if (ret != 0) + goto clean_vid; + + ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + ALE_ALL_PORTS << priv->host_port, + ALE_VLAN, vid, 0); + if (ret != 0) + goto clean_vlan_ucast; + return 0; + +clean_vlan_ucast: + cpsw_ale_del_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, vid); +clean_vid: + cpsw_ale_del_vlan(priv->ale, vid, 0); + return ret; +} + +static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, + unsigned short vid) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + if (vid == priv->data.default_vlan) + return 0; + + dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); + return cpsw_add_vlan_ale_entry(priv, vid); +} + +static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, + unsigned short vid) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + if (vid == priv->data.default_vlan) + return 0; + + dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); + ret = cpsw_ale_del_vlan(priv->ale, vid, 0); + if (ret != 0) + return ret; + + ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, vid); + if (ret != 0) + return ret; + + return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, + 0, ALE_VLAN, vid); +} + static const struct net_device_ops cpsw_netdev_ops = { .ndo_open = cpsw_ndo_open, .ndo_stop = cpsw_ndo_stop, @@ -938,15 +1186,18 @@ static const struct net_device_ops cpsw_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cpsw_ndo_poll_controller, #endif + .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, }; static void cpsw_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct cpsw_priv *priv = netdev_priv(ndev); - strcpy(info->driver, "TI CPSW Driver v1.0"); - strcpy(info->version, "1.0"); - strcpy(info->bus_info, priv->pdev->name); + + strlcpy(info->driver, "TI CPSW Driver v1.0", sizeof(info->driver)); + strlcpy(info->version, "1.0", sizeof(info->version)); + strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info)); } static u32 cpsw_get_msglevel(struct net_device *ndev) @@ -974,7 +1225,7 @@ static int cpsw_get_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = priv->cpts.phc_index; + info->phc_index = priv->cpts->phc_index; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -1011,6 +1262,7 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, slave->data = data; slave->regs = regs + slave_reg_ofs; slave->sliver = regs + sliver_reg_ofs; + slave->port_vlan = data->dual_emac_res_vlan; } static int cpsw_probe_dt(struct cpsw_platform_data *data, @@ -1051,12 +1303,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } data->cpts_clock_shift = prop; - data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) * - data->slaves, GFP_KERNEL); - if (!data->slave_data) { - pr_err("Could not allocate slave memory.\n"); + data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data), + GFP_KERNEL); + if (!data->slave_data) return -EINVAL; - } if (of_property_read_u32(node, "cpdma_channels", &prop)) { pr_err("Missing cpdma_channels property in the DT.\n"); @@ -1093,6 +1343,9 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } data->mac_control = prop; + if (!of_property_read_u32(node, "dual_emac", &prop)) + data->dual_emac = prop; + /* * Populate all the child nodes here... */ @@ -1126,6 +1379,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (mac_addr) memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); + if (data->dual_emac) { + if (of_property_read_u32(node, "dual_emac_res_vlan", + &prop)) { + pr_err("Missing dual_emac_res_vlan in DT.\n"); + slave_data->dual_emac_res_vlan = i+1; + pr_err("Using %d as Reserved VLAN for %d slave\n", + slave_data->dual_emac_res_vlan, i); + } else { + slave_data->dual_emac_res_vlan = prop; + } + } + i++; } @@ -1136,6 +1401,79 @@ error_ret: return ret; } +static int cpsw_probe_dual_emac(struct platform_device *pdev, + struct cpsw_priv *priv) +{ + struct cpsw_platform_data *data = &priv->data; + struct net_device *ndev; + struct cpsw_priv *priv_sl2; + int ret = 0, i; + + ndev = alloc_etherdev(sizeof(struct cpsw_priv)); + if (!ndev) { + pr_err("cpsw: error allocating net_device\n"); + return -ENOMEM; + } + + priv_sl2 = netdev_priv(ndev); + spin_lock_init(&priv_sl2->lock); + priv_sl2->data = *data; + priv_sl2->pdev = pdev; + priv_sl2->ndev = ndev; + priv_sl2->dev = &ndev->dev; + priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); + priv_sl2->rx_packet_max = max(rx_packet_max, 128); + + if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { + memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, + ETH_ALEN); + pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); + } else { + random_ether_addr(priv_sl2->mac_addr); + pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); + } + memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); + + priv_sl2->slaves = priv->slaves; + priv_sl2->clk = priv->clk; + + priv_sl2->cpsw_res = priv->cpsw_res; + priv_sl2->regs = priv->regs; + priv_sl2->host_port = priv->host_port; + priv_sl2->host_port_regs = priv->host_port_regs; + priv_sl2->wr_regs = priv->wr_regs; + priv_sl2->dma = priv->dma; + priv_sl2->txch = priv->txch; + priv_sl2->rxch = priv->rxch; + priv_sl2->ale = priv->ale; + priv_sl2->emac_port = 1; + priv->slaves[1].ndev = ndev; + priv_sl2->cpts = priv->cpts; + priv_sl2->version = priv->version; + + for (i = 0; i < priv->num_irqs; i++) { + priv_sl2->irqs_table[i] = priv->irqs_table[i]; + priv_sl2->num_irqs = priv->num_irqs; + } + + ndev->features |= NETIF_F_HW_VLAN_FILTER; + + ndev->netdev_ops = &cpsw_netdev_ops; + SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); + netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT); + + /* register the network device */ + SET_NETDEV_DEV(ndev, &pdev->dev); + ret = register_netdev(ndev); + if (ret) { + pr_err("cpsw: error registering net device\n"); + free_netdev(ndev); + ret = -ENODEV; + } + + return ret; +} + static int cpsw_probe(struct platform_device *pdev) { struct cpsw_platform_data *data = pdev->dev.platform_data; @@ -1162,6 +1500,11 @@ static int cpsw_probe(struct platform_device *pdev) priv->dev = &ndev->dev; priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); priv->rx_packet_max = max(rx_packet_max, 128); + priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); + if (!ndev) { + pr_err("error allocating cpts\n"); + goto clean_ndev_ret; + } /* * This may be required here for child devices. @@ -1194,6 +1537,9 @@ static int cpsw_probe(struct platform_device *pdev) for (i = 0; i < data->slaves; i++) priv->slaves[i].slave_num = i; + priv->slaves[0].ndev = ndev; + priv->emac_port = 0; + priv->clk = clk_get(&pdev->dev, "fck"); if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "fck is not found\n"); @@ -1248,7 +1594,7 @@ static int cpsw_probe(struct platform_device *pdev) switch (priv->version) { case CPSW_VERSION_1: priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; - priv->cpts.reg = ss_regs + CPSW1_CPTS_OFFSET; + priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; @@ -1259,7 +1605,7 @@ static int cpsw_probe(struct platform_device *pdev) break; case CPSW_VERSION_2: priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; - priv->cpts.reg = ss_regs + CPSW2_CPTS_OFFSET; + priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; @@ -1346,7 +1692,7 @@ static int cpsw_probe(struct platform_device *pdev) k++; } - ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */ + ndev->features |= NETIF_F_HW_VLAN_FILTER; ndev->netdev_ops = &cpsw_netdev_ops; SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); @@ -1361,13 +1707,21 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_irq_ret; } - if (cpts_register(&pdev->dev, &priv->cpts, + if (cpts_register(&pdev->dev, priv->cpts, data->cpts_clock_mult, data->cpts_clock_shift)) dev_err(priv->dev, "error registering cpts device\n"); cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n", priv->cpsw_res->start, ndev->irq); + if (priv->data.dual_emac) { + ret = cpsw_probe_dual_emac(pdev, priv); + if (ret) { + cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); + goto clean_irq_ret; + } + } + return 0; clean_irq_ret: @@ -1406,7 +1760,7 @@ static int cpsw_remove(struct platform_device *pdev) pr_info("removing device"); platform_set_drvdata(pdev, NULL); - cpts_unregister(&priv->cpts); + cpts_unregister(priv->cpts); free_irq(ndev->irq, priv); cpsw_ale_destroy(priv->ale); cpdma_chan_destroy(priv->txch); diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 0e9ccc2cf91f..7fa60d6092ed 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -148,7 +148,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry) return idx; } -static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr) +int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS]; int type, idx; @@ -160,6 +160,8 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr) type = cpsw_ale_get_entry_type(ale_entry); if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR) continue; + if (cpsw_ale_get_vlan_id(ale_entry) != vid) + continue; cpsw_ale_get_addr(ale_entry, entry_addr); if (memcmp(entry_addr, addr, 6) == 0) return idx; @@ -167,6 +169,22 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr) return -ENOENT; } +int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int type, idx; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + type = cpsw_ale_get_entry_type(ale_entry); + if (type != ALE_TYPE_VLAN) + continue; + if (cpsw_ale_get_vlan_id(ale_entry) == vid) + return idx; + } + return -ENOENT; +} + static int cpsw_ale_match_free(struct cpsw_ale *ale) { u32 ale_entry[ALE_ENTRY_WORDS]; @@ -274,19 +292,32 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask) return 0; } -int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags) +static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry, + int flags, u16 vid) +{ + if (flags & ALE_VLAN) { + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR); + cpsw_ale_set_vlan_id(ale_entry, vid); + } else { + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); + } +} + +int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; - cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); + cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid); + cpsw_ale_set_addr(ale_entry, addr); cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT); cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0); cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); cpsw_ale_set_port_num(ale_entry, port); - idx = cpsw_ale_match_addr(ale, addr); + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) idx = cpsw_ale_match_free(ale); if (idx < 0) @@ -298,12 +329,13 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags) return 0; } -int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port) +int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; - idx = cpsw_ale_match_addr(ale, addr); + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) return -ENOENT; @@ -313,18 +345,19 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port) } int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, - int super, int mcast_state) + int flags, u16 vid, int mcast_state) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx, mask; - idx = cpsw_ale_match_addr(ale, addr); + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx >= 0) cpsw_ale_read(ale, idx, ale_entry); - cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); + cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid); + cpsw_ale_set_addr(ale_entry, addr); - cpsw_ale_set_super(ale_entry, super); + cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); cpsw_ale_set_mcast_state(ale_entry, mcast_state); mask = cpsw_ale_get_port_mask(ale_entry); @@ -342,12 +375,13 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, return 0; } -int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask) +int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, + int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; - idx = cpsw_ale_match_addr(ale, addr); + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) return -EINVAL; @@ -362,6 +396,55 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask) return 0; } +int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, + int reg_mcast, int unreg_mcast) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int idx; + + idx = cpsw_ale_match_vlan(ale, vid); + if (idx >= 0) + cpsw_ale_read(ale, idx, ale_entry); + + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN); + cpsw_ale_set_vlan_id(ale_entry, vid); + + cpsw_ale_set_vlan_untag_force(ale_entry, untag); + cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast); + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); + cpsw_ale_set_vlan_member_list(ale_entry, port); + + if (idx < 0) + idx = cpsw_ale_match_free(ale); + if (idx < 0) + idx = cpsw_ale_find_ageable(ale); + if (idx < 0) + return -ENOMEM; + + cpsw_ale_write(ale, idx, ale_entry); + return 0; +} + +int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int idx; + + idx = cpsw_ale_match_vlan(ale, vid); + if (idx < 0) + return -ENOENT; + + cpsw_ale_read(ale, idx, ale_entry); + + if (port_mask) + cpsw_ale_set_vlan_member_list(ale_entry, port_mask); + else + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + + cpsw_ale_write(ale, idx, ale_entry); + return 0; +} + struct ale_control_info { const char *name; int offset, port_offset; diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 2bd09cbce522..30daa1265f0c 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -64,8 +64,14 @@ enum cpsw_ale_port_state { }; /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */ -#define ALE_SECURE 1 -#define ALE_BLOCKED 2 +#define ALE_SECURE BIT(0) +#define ALE_BLOCKED BIT(1) +#define ALE_SUPER BIT(2) +#define ALE_VLAN BIT(3) + +#define ALE_PORT_HOST BIT(0) +#define ALE_PORT_1 BIT(1) +#define ALE_PORT_2 BIT(2) #define ALE_MCAST_FWD 0 #define ALE_MCAST_BLOCK_LEARN_FWD 1 @@ -81,11 +87,17 @@ void cpsw_ale_stop(struct cpsw_ale *ale); int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask); -int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags); -int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port); +int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid); +int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid); int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, - int super, int mcast_state); -int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask); + int flags, u16 vid, int mcast_state); +int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, + int flags, u16 vid); +int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, + int reg_mcast, int unreg_mcast); +int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); int cpsw_ale_control_set(struct cpsw_ale *ale, int port, diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 49956730cd8d..68c3418160ba 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -60,6 +60,9 @@ #define CPDMA_DESC_EOQ BIT(28) #define CPDMA_DESC_TD_COMPLETE BIT(27) #define CPDMA_DESC_PASS_CRC BIT(26) +#define CPDMA_DESC_TO_PORT_EN BIT(20) +#define CPDMA_TO_PORT_SHIFT 16 +#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) #define CPDMA_TEARDOWN_VALUE 0xfffffffc @@ -105,13 +108,13 @@ struct cpdma_ctlr { }; struct cpdma_chan { + struct cpdma_desc __iomem *head, *tail; + void __iomem *hdp, *cp, *rxfree; enum cpdma_state state; struct cpdma_ctlr *ctlr; int chan_num; spinlock_t lock; - struct cpdma_desc __iomem *head, *tail; int count; - void __iomem *hdp, *cp, *rxfree; u32 mask; cpdma_handler_fn handler; enum dma_data_direction dir; @@ -132,6 +135,14 @@ struct cpdma_chan { #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) +#define cpdma_desc_to_port(chan, mode, directed) \ + do { \ + if (!is_rx_chan(chan) && ((directed == 1) || \ + (directed == 2))) \ + mode |= (CPDMA_DESC_TO_PORT_EN | \ + (directed << CPDMA_TO_PORT_SHIFT)); \ + } while (0) + /* * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci * emac) have dedicated on-chip memory for these descriptors. Some other @@ -217,17 +228,27 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) } static struct cpdma_desc __iomem * -cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc) +cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) { unsigned long flags; int index; + int desc_start; + int desc_end; struct cpdma_desc __iomem *desc = NULL; spin_lock_irqsave(&pool->lock, flags); - index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0, - num_desc, 0); - if (index < pool->num_desc) { + if (is_rx) { + desc_start = 0; + desc_end = pool->num_desc/2; + } else { + desc_start = pool->num_desc/2; + desc_end = pool->num_desc; + } + + index = bitmap_find_next_zero_area(pool->bitmap, + desc_end, desc_start, num_desc, 0); + if (index < desc_end) { bitmap_set(pool->bitmap, index, num_desc); desc = pool->iomap + pool->desc_size * index; pool->used_desc++; @@ -439,10 +460,8 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) if (ctlr->state != CPDMA_STATE_IDLE) cpdma_ctlr_stop(ctlr); - for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { - if (ctlr->channels[i]) - cpdma_chan_destroy(ctlr->channels[i]); - } + for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) + cpdma_chan_destroy(ctlr->channels[i]); cpdma_desc_pool_destroy(ctlr->pool); spin_unlock_irqrestore(&ctlr->lock, flags); @@ -474,9 +493,9 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) return 0; } -void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr) +void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) { - dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0); + dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); } struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, @@ -652,7 +671,7 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan, } int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, - int len, gfp_t gfp_mask) + int len, int directed, gfp_t gfp_mask) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; @@ -668,7 +687,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, goto unlock_ret; } - desc = cpdma_desc_alloc(ctlr->pool, 1); + desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); if (!desc) { chan->stats.desc_alloc_fail++; ret = -ENOMEM; @@ -682,6 +701,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, buffer = dma_map_single(ctlr->dev, data, len, chan->dir); mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; + cpdma_desc_to_port(chan, mode, directed); desc_write(desc, hw_next, 0); desc_write(desc, hw_buffer, buffer); @@ -704,6 +724,29 @@ unlock_ret: } EXPORT_SYMBOL_GPL(cpdma_chan_submit); +bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) +{ + unsigned long flags; + int index; + bool ret; + struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_desc_pool *pool = ctlr->pool; + + spin_lock_irqsave(&pool->lock, flags); + + index = bitmap_find_next_zero_area(pool->bitmap, + pool->num_desc, pool->num_desc/2, 1, 0); + + if (index < pool->num_desc) + ret = true; + else + ret = false; + + spin_unlock_irqrestore(&pool->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); + static void __cpdma_chan_free(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc, int outlen, int status) @@ -749,7 +792,8 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) status = -EBUSY; goto unlock_ret; } - status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE); + status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | + CPDMA_DESC_PORT_MASK); chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); chan_write(chan, cp, desc_dma); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index afa19a0c0d81..d9bcc6032fdc 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -24,6 +24,13 @@ #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) #define chan_linear(chan) __chan_linear((chan)->chan_num) +#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7) + +#define CPDMA_EOI_RX_THRESH 0x0 +#define CPDMA_EOI_RX 0x1 +#define CPDMA_EOI_TX 0x2 +#define CPDMA_EOI_MISC 0x3 + struct cpdma_params { struct device *dev; void __iomem *dmaregs; @@ -82,12 +89,13 @@ int cpdma_chan_dump(struct cpdma_chan *chan); int cpdma_chan_get_stats(struct cpdma_chan *chan, struct cpdma_chan_stats *stats); int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, - int len, gfp_t gfp_mask); + int len, int directed, gfp_t gfp_mask); int cpdma_chan_process(struct cpdma_chan *chan, int quota); int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable); -void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr); +void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value); int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable); +bool cpdma_check_free_tx_desc(struct cpdma_chan *chan); enum cpdma_control { CPDMA_CMD_IDLE, /* write-only */ diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 2a3e2c56bc60..52c05366599a 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -120,7 +120,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; #define EMAC_DEF_TX_CH (0) /* Default 0th channel */ #define EMAC_DEF_RX_CH (0) /* Default 0th channel */ #define EMAC_DEF_RX_NUM_DESC (128) -#define EMAC_DEF_TX_NUM_DESC (128) #define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ #define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ #define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ @@ -342,7 +341,6 @@ struct emac_priv { u32 mac_hash2; u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; u32 rx_addr_type; - atomic_t cur_tx; const char *phy_id; #ifdef CONFIG_OF struct device_node *phy_node; @@ -480,8 +478,8 @@ static void emac_dump_regs(struct emac_priv *priv) static void emac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { - strcpy(info->driver, emac_version_string); - strcpy(info->version, EMAC_MODULE_VERSION); + strlcpy(info->driver, emac_version_string, sizeof(info->driver)); + strlcpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version)); } /** @@ -1039,7 +1037,7 @@ static void emac_rx_handler(void *token, int len, int status) recycle: ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, - skb_tailroom(skb), GFP_KERNEL); + skb_tailroom(skb), 0, GFP_KERNEL); WARN_ON(ret == -ENOMEM); if (unlikely(ret < 0)) @@ -1050,10 +1048,10 @@ static void emac_tx_handler(void *token, int len, int status) { struct sk_buff *skb = token; struct net_device *ndev = skb->dev; - struct emac_priv *priv = netdev_priv(ndev); - - atomic_dec(&priv->cur_tx); + /* Check whether the queue is stopped due to stalled tx dma, if the + * queue is stopped then start the queue as we have free desc for tx + */ if (unlikely(netif_queue_stopped(ndev))) netif_start_queue(ndev); ndev->stats.tx_packets++; @@ -1094,14 +1092,17 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) skb_tx_timestamp(skb); ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len, - GFP_KERNEL); + 0, GFP_KERNEL); if (unlikely(ret_code != 0)) { if (netif_msg_tx_err(priv) && net_ratelimit()) dev_err(emac_dev, "DaVinci EMAC: desc submit failed"); goto fail_tx; } - if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC) + /* If there is no more tx desc left free then we need to + * tell the kernel to stop sending us tx frames. + */ + if (unlikely(cpdma_check_free_tx_desc(priv->txchan))) netif_stop_queue(ndev); return NETDEV_TX_OK; @@ -1264,7 +1265,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) /* Store mac addr in priv and rx channel and set it in EMAC hw */ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); - ndev->addr_assign_type &= ~NET_ADDR_RANDOM; /* MAC address is configured only after the interface is enabled. */ if (netif_running(ndev)) { @@ -1558,7 +1558,7 @@ static int emac_dev_open(struct net_device *ndev) break; ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, - skb_tailroom(skb), GFP_KERNEL); + skb_tailroom(skb), 0, GFP_KERNEL); if (WARN_ON(ret < 0)) break; } @@ -1600,7 +1600,7 @@ static int emac_dev_open(struct net_device *ndev) if (priv->phy_id && *priv->phy_id) { priv->phydev = phy_connect(ndev, priv->phy_id, - &emac_adjust_link, 0, + &emac_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(priv->phydev)) { diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index cca25509b039..d04a622b08d4 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -320,10 +320,8 @@ static int davinci_mdio_probe(struct platform_device *pdev) int ret, addr; data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) { - dev_err(dev, "failed to alloc device data\n"); + if (!data) return -ENOMEM; - } data->bus = mdiobus_alloc(); if (!data->bus) { diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 96070e9b50dc..36435499814b 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -2195,7 +2195,6 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p) /* ISSUE: Note that "dev_addr" is now a pointer. */ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - dev->addr_assign_type &= ~NET_ADDR_RANDOM; return 0; } diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index e321d0b6fc88..445c0595c997 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -1226,8 +1226,8 @@ int gelic_net_open(struct net_device *netdev) void gelic_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { - strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1); - strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } static int gelic_ether_get_settings(struct net_device *netdev, diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c index 9c288cd7d171..ffe519382e11 100644 --- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c +++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c @@ -72,11 +72,13 @@ spider_net_ethtool_get_drvinfo(struct net_device *netdev, card = netdev_priv(netdev); /* clear and fill out info */ - memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); - strncpy(drvinfo->driver, spider_net_driver_name, 32); - strncpy(drvinfo->version, VERSION, 32); - strcpy(drvinfo->fw_version, "no information"); - strncpy(drvinfo->bus_info, pci_name(card->pdev), 32); + strlcpy(drvinfo->driver, spider_net_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "no information", + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(card->pdev), + sizeof(drvinfo->bus_info)); } static void diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 9819349eaa1e..fe256094db35 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -633,9 +633,8 @@ static int tc_mii_probe(struct net_device *dev) /* attach the mac to the phy */ phydev = phy_connect(dev, dev_name(&phydev->dev), - &tc_handle_link_change, 0, - lp->chiptype == TC35815_TX4939 ? - PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); + &tc_handle_link_change, + lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); return PTR_ERR(phydev); @@ -856,7 +855,6 @@ static int tc35815_init_one(struct pci_dev *pdev, if (rc) goto err_out; - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n", dev->name, chip_info[ent->driver_data].name, @@ -1976,9 +1974,10 @@ tc35815_set_multicast_list(struct net_device *dev) static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct tc35815_local *lp = netdev_priv(dev); - strcpy(info->driver, MODNAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(lp->pci_dev)); + + strlcpy(info->driver, MODNAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info)); } static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 78ace59efd29..185c721c52d7 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -417,6 +417,12 @@ enum chip_cmd_bits { Cmd1NoTxPoll=0x08, Cmd1Reset=0x80, }; +struct rhine_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + struct rhine_private { /* Bit mask for configured VLAN ids */ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -458,6 +464,8 @@ struct rhine_private { unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ unsigned int cur_tx, dirty_tx; unsigned int rx_buf_sz; /* Based on MTU+slack. */ + struct rhine_stats rx_stats; + struct rhine_stats tx_stats; u8 wolopts; u8 tx_thresh, rx_thresh; @@ -495,7 +503,8 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance); static void rhine_tx(struct net_device *dev); static int rhine_rx(struct net_device *dev, int limit); static void rhine_set_rx_mode(struct net_device *dev); -static struct net_device_stats *rhine_get_stats(struct net_device *dev); +static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; static int rhine_close(struct net_device *dev); @@ -842,7 +851,7 @@ static const struct net_device_ops rhine_netdev_ops = { .ndo_open = rhine_open, .ndo_stop = rhine_close, .ndo_start_xmit = rhine_start_tx, - .ndo_get_stats = rhine_get_stats, + .ndo_get_stats64 = rhine_get_stats64, .ndo_set_rx_mode = rhine_set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, @@ -990,7 +999,6 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev_info(dev, "Using random MAC address: %pM\n", dev->dev_addr); } - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); /* For Rhine-I/II, phy_id is loaded from EEPROM */ if (!phy_id) @@ -1791,8 +1799,11 @@ static void rhine_tx(struct net_device *dev) dev->stats.collisions += txstatus & 0x0F; netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", (txstatus >> 3) & 0xF, txstatus & 0xF); - dev->stats.tx_bytes += rp->tx_skbuff[entry]->len; - dev->stats.tx_packets++; + + u64_stats_update_begin(&rp->tx_stats.syncp); + rp->tx_stats.bytes += rp->tx_skbuff[entry]->len; + rp->tx_stats.packets++; + u64_stats_update_end(&rp->tx_stats.syncp); } /* Free the original skb. */ if (rp->tx_skbuff_dma[entry]) { @@ -1924,8 +1935,11 @@ static int rhine_rx(struct net_device *dev, int limit) if (unlikely(desc_length & DescTag)) __vlan_hwaccel_put_tag(skb, vlan_tci); netif_receive_skb(skb); - dev->stats.rx_bytes += pkt_len; - dev->stats.rx_packets++; + + u64_stats_update_begin(&rp->rx_stats.syncp); + rp->rx_stats.bytes += pkt_len; + rp->rx_stats.packets++; + u64_stats_update_end(&rp->rx_stats.syncp); } entry = (++rp->cur_rx) % RX_RING_SIZE; rp->rx_head_desc = &rp->rx_ring[entry]; @@ -2016,15 +2030,31 @@ out_unlock: mutex_unlock(&rp->task_lock); } -static struct net_device_stats *rhine_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 * +rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rhine_private *rp = netdev_priv(dev); + unsigned int start; spin_lock_bh(&rp->lock); rhine_update_rx_crc_and_missed_errord(rp); spin_unlock_bh(&rp->lock); - return &dev->stats; + netdev_stats_to_stats64(stats, &dev->stats); + + do { + start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp); + stats->rx_packets = rp->rx_stats.packets; + stats->rx_bytes = rp->rx_stats.bytes; + } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start)); + + do { + start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp); + stats->tx_packets = rp->tx_stats.packets; + stats->tx_bytes = rp->tx_stats.bytes; + } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start)); + + return stats; } static void rhine_set_rx_mode(struct net_device *dev) diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 352383890326..545043cc4c0b 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -570,7 +570,6 @@ static int w5100_set_macaddr(struct net_device *ndev, void *addr) if (!is_valid_ether_addr(sock_addr->sa_data)) return -EADDRNOTAVAIL; memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN); - ndev->addr_assign_type &= ~NET_ADDR_RANDOM; w5100_write_macaddr(priv); return 0; } diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 9d1d986f8d40..7cbd0e6fc6f3 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -490,7 +490,6 @@ static int w5300_set_macaddr(struct net_device *ndev, void *addr) if (!is_valid_ether_addr(sock_addr->sa_data)) return -EADDRNOTAVAIL; memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN); - ndev->addr_assign_type &= ~NET_ADDR_RANDOM; w5300_write_macaddr(priv); return 0; } diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index aad909d793d7..9fc2ada4c3c2 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -238,11 +238,9 @@ static int temac_dma_bd_init(struct net_device *ndev) int i; lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL); - if (!lp->rx_skb) { - dev_err(&ndev->dev, - "can't allocate memory for DMA RX buffer\n"); + if (!lp->rx_skb) goto out; - } + /* allocate the tx and rx ring buffer descriptors. */ /* returns a virtual address and a physical address. */ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, @@ -319,18 +317,10 @@ out: * net_device_ops */ -static int temac_set_mac_address(struct net_device *ndev, void *address) +static void temac_do_set_mac_address(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); - if (address) - memcpy(ndev->dev_addr, address, ETH_ALEN); - - if (!is_valid_ether_addr(ndev->dev_addr)) - eth_hw_addr_random(ndev); - else - ndev->addr_assign_type &= ~NET_ADDR_RANDOM; - /* set up unicast MAC address filter set its mac address */ mutex_lock(&lp->indirect_mutex); temac_indirect_out32(lp, XTE_UAW0_OFFSET, @@ -344,15 +334,26 @@ static int temac_set_mac_address(struct net_device *ndev, void *address) (ndev->dev_addr[4] & 0x000000ff) | (ndev->dev_addr[5] << 8)); mutex_unlock(&lp->indirect_mutex); +} +static int temac_init_mac_address(struct net_device *ndev, void *address) +{ + memcpy(ndev->dev_addr, address, ETH_ALEN); + if (!is_valid_ether_addr(ndev->dev_addr)) + eth_hw_addr_random(ndev); + temac_do_set_mac_address(ndev); return 0; } -static int netdev_set_mac_address(struct net_device *ndev, void *p) +static int temac_set_mac_address(struct net_device *ndev, void *p) { struct sockaddr *addr = p; - return temac_set_mac_address(ndev, addr->sa_data); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); + temac_do_set_mac_address(ndev); + return 0; } static void temac_set_multicast_list(struct net_device *ndev) @@ -579,7 +580,7 @@ static void temac_device_reset(struct net_device *ndev) temac_setoptions(ndev, lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN)); - temac_set_mac_address(ndev, NULL); + temac_do_set_mac_address(ndev); /* Set address filter table */ temac_set_multicast_list(ndev); @@ -938,7 +939,7 @@ static const struct net_device_ops temac_netdev_ops = { .ndo_open = temac_open, .ndo_stop = temac_stop, .ndo_start_xmit = temac_start_xmit, - .ndo_set_mac_address = netdev_set_mac_address, + .ndo_set_mac_address = temac_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = temac_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1106,7 +1107,7 @@ static int temac_of_probe(struct platform_device *op) rc = -ENODEV; goto err_iounmap_2; } - temac_set_mac_address(ndev, (void *)addr); + temac_init_mac_address(ndev, (void *)addr); rc = temac_mdio_setup(lp, op->dev.of_node); if (rc) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 6f47100e58d7..278c9db3b5b8 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1124,9 +1124,8 @@ static int axienet_ethtools_set_settings(struct net_device *ndev, static void axienet_ethtools_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) { - memset(ed, 0, sizeof(struct ethtool_drvinfo)); - strcpy(ed->driver, DRIVER_NAME); - strcpy(ed->version, DRIVER_VERSION); + strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); + strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); ed->regdump_len = sizeof(u32) * AXIENET_REGS_N; } diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 94a1f94f74b8..98e09d0d3ce2 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1412,7 +1412,8 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver)); - sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); + snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx", + dev->base_addr); } static const struct ethtool_ops netdev_ethtool_ops = { diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index d3ebb73277be..6958a5e87703 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -977,11 +977,12 @@ static void ixp4xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct port *port = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u", port->firmware[0], port->firmware[1], port->firmware[2], port->firmware[3]); - strcpy(info->bus_info, "internal"); + strlcpy(info->bus_info, "internal", sizeof(info->bus_info)); } static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) @@ -1450,7 +1451,7 @@ static int eth_init_one(struct platform_device *pdev) snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, mdio_bus->id, plat->phy); - port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0, + port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(port->phydev)) { err = PTR_ERR(port->phydev); diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index c2e5497397d5..02de6c891670 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -586,7 +586,8 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi static int __init bpq_init_driver(void) { #ifdef CONFIG_PROC_FS - if (!proc_net_fops_create(&init_net, "bpqether", S_IRUGO, &bpq_info_fops)) { + if (!proc_create("bpqether", S_IRUGO, init_net.proc_net, + &bpq_info_fops)) { printk(KERN_ERR "bpq: cannot create /proc/net/bpqether entry.\n"); return -ENOENT; @@ -610,7 +611,7 @@ static void __exit bpq_cleanup_driver(void) unregister_netdevice_notifier(&bpq_dev_notifier); - proc_net_remove(&init_net, "bpqether"); + remove_proc_entry("bpqether", init_net.proc_net); rtnl_lock(); while (!list_empty(&bpq_devices)) { diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index ce555d9ac02c..6636022a1027 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -463,13 +463,8 @@ static int __init setup_adapter(int card_base, int type, int n) /* Initialize what is necessary for write_scc and write_scc_data */ info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA); - if (!info) { - printk(KERN_ERR "dmascc: " - "could not allocate memory for %s at %#3x\n", - hw[type].name, card_base); + if (!info) goto out; - } - info->dev[0] = alloc_netdev(0, "", dev_setup); if (!info->dev[0]) { diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 1b4a47bd32b7..bc1d52170389 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -2118,7 +2118,7 @@ static int __init scc_init_driver (void) } rtnl_unlock(); - proc_net_fops_create(&init_net, "z8530drv", 0, &scc_net_seq_fops); + proc_create("z8530drv", 0, init_net.proc_net, &scc_net_seq_fops); return 0; } @@ -2173,7 +2173,7 @@ static void __exit scc_cleanup_driver(void) if (Vector_Latch) release_region(Vector_Latch, 1); - proc_net_remove(&init_net, "z8530drv"); + remove_proc_entry("z8530drv", init_net.proc_net); } MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>"); diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index c6645f1017af..4cf8f1017aad 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -1167,7 +1167,7 @@ static int __init yam_init_driver(void) yam_timer.expires = jiffies + HZ / 100; add_timer(&yam_timer); - proc_net_fops_create(&init_net, "yam", S_IRUGO, &yam_info_fops); + proc_create("yam", S_IRUGO, init_net.proc_net, &yam_info_fops); return 0; error: while (--i >= 0) { @@ -1199,7 +1199,7 @@ static void __exit yam_cleanup_driver(void) kfree(p); } - proc_net_remove(&init_net, "yam"); + remove_proc_entry("yam", init_net.proc_net); } /* --------------------------------------------------------------------- */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 8264f0ef7692..d5202a4b0877 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -304,9 +304,9 @@ int netvsc_recv_callback(struct hv_device *device_obj, static void netvsc_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { - strcpy(info->driver, KBUILD_MODNAME); - strcpy(info->version, HV_DRV_VERSION); - strcpy(info->fw_version, "N/A"); + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->version, HV_DRV_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); } static int netvsc_change_mtu(struct net_device *ndev, int mtu) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index a4a62e170ec0..fc1687ea4a42 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -751,16 +751,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) return 0; } -static int at86rf230_suspend(struct spi_device *spi, pm_message_t message) -{ - return 0; -} - -static int at86rf230_resume(struct spi_device *spi) -{ - return 0; -} - static int at86rf230_fill_data(struct spi_device *spi) { struct at86rf230_local *lp = spi_get_drvdata(spi); @@ -948,8 +938,6 @@ static struct spi_driver at86rf230_driver = { }, .probe = at86rf230_probe, .remove = at86rf230_remove, - .suspend = at86rf230_suspend, - .resume = at86rf230_resume, }; module_spi_driver(at86rf230_driver); diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c index 1e9cb0bbf62c..8f1c25676d44 100644 --- a/drivers/net/ieee802154/fakehard.c +++ b/drivers/net/ieee802154/fakehard.c @@ -372,7 +372,6 @@ static int ieee802154fake_probe(struct platform_device *pdev) memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef", dev->addr_len); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); /* * For now we'd like to emulate 2.4 GHz-only device, diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 344dceb1aaf9..82164381f778 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -90,7 +90,7 @@ static void ri_tasklet(unsigned long dev) u64_stats_update_end(&dp->tsync); rcu_read_lock(); - skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif); + skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif); if (!skb->dev) { rcu_read_unlock(); dev_kfree_skb(skb); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d3fb97d97cbc..defcd8a85744 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -29,6 +29,7 @@ #include <linux/if_vlan.h> #include <linux/if_link.h> #include <linux/if_macvlan.h> +#include <linux/hash.h> #include <net/rtnetlink.h> #include <net/xfrm.h> @@ -126,6 +127,21 @@ static int macvlan_broadcast_one(struct sk_buff *skb, return vlan->receive(skb); } +static u32 macvlan_hash_mix(const struct macvlan_dev *vlan) +{ + return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT); +} + + +static unsigned int mc_hash(const struct macvlan_dev *vlan, + const unsigned char *addr) +{ + u32 val = __get_unaligned_cpu32(addr + 2); + + val ^= macvlan_hash_mix(vlan); + return hash_32(val, MACVLAN_MC_FILTER_BITS); +} + static void macvlan_broadcast(struct sk_buff *skb, const struct macvlan_port *port, struct net_device *src, @@ -137,6 +153,7 @@ static void macvlan_broadcast(struct sk_buff *skb, struct sk_buff *nskb; unsigned int i; int err; + unsigned int hash; if (skb->protocol == htons(ETH_P_PAUSE)) return; @@ -146,6 +163,9 @@ static void macvlan_broadcast(struct sk_buff *skb, if (vlan->dev == src || !(vlan->mode & mode)) continue; + hash = mc_hash(vlan, eth->h_dest); + if (!test_bit(hash, vlan->mc_filter)) + continue; nskb = skb_clone(skb, GFP_ATOMIC); err = macvlan_broadcast_one(nskb, vlan, eth, mode == MACVLAN_MODE_BRIDGE); @@ -375,7 +395,6 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p) if (!(dev->flags & IFF_UP)) { /* Just copy in the new address */ - dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); } else { /* Rehash and update the device filters */ @@ -406,6 +425,21 @@ static void macvlan_set_mac_lists(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); + if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { + bitmap_fill(vlan->mc_filter, MACVLAN_MC_FILTER_SZ); + } else { + struct netdev_hw_addr *ha; + DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ); + + bitmap_zero(filter, MACVLAN_MC_FILTER_SZ); + netdev_for_each_mc_addr(ha, dev) { + __set_bit(mc_hash(vlan, ha->addr), filter); + } + + __set_bit(mc_hash(vlan, dev->broadcast), filter); + + bitmap_copy(vlan->mc_filter, filter, MACVLAN_MC_FILTER_SZ); + } dev_uc_sync(vlan->lowerdev, dev); dev_mc_sync(vlan->lowerdev, dev); } @@ -565,7 +599,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } -static int macvlan_fdb_del(struct ndmsg *ndm, +static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr) { @@ -586,8 +620,8 @@ static int macvlan_fdb_del(struct ndmsg *ndm, static void macvlan_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { - snprintf(drvinfo->driver, 32, "macvlan"); - snprintf(drvinfo->version, 32, "0.1"); + strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version)); } static int macvlan_ethtool_get_settings(struct net_device *dev, @@ -765,16 +799,22 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN); } + err = netdev_upper_dev_link(lowerdev, dev); + if (err) + goto destroy_port; + port->count += 1; err = register_netdevice(dev); if (err < 0) - goto destroy_port; + goto upper_dev_unlink; list_add_tail(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); return 0; +upper_dev_unlink: + netdev_upper_dev_unlink(lowerdev, dev); destroy_port: port->count -= 1; if (!port->count) @@ -798,6 +838,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head) list_del(&vlan->list); unregister_netdevice_queue(dev, head); + netdev_upper_dev_unlink(vlan->lowerdev, dev); } EXPORT_SYMBOL_GPL(macvlan_dellink); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 0f0f9ce3a776..97243011d319 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -742,6 +742,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (zerocopy) { skb_shinfo(skb)->destructor_arg = m->msg_control; skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; + skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; } if (vlan) macvlan_start_xmit(skb, vlan->dev); diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 6989ebe2bc79..37add21a3d7d 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -269,12 +269,18 @@ static ssize_t show_remote_port(struct netconsole_target *nt, char *buf) static ssize_t show_local_ip(struct netconsole_target *nt, char *buf) { - return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.local_ip); + if (nt->np.ipv6) + return snprintf(buf, PAGE_SIZE, "%pI6c\n", &nt->np.local_ip.in6); + else + return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.local_ip); } static ssize_t show_remote_ip(struct netconsole_target *nt, char *buf) { - return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.remote_ip); + if (nt->np.ipv6) + return snprintf(buf, PAGE_SIZE, "%pI6c\n", &nt->np.remote_ip.in6); + else + return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.remote_ip); } static ssize_t show_local_mac(struct netconsole_target *nt, char *buf) @@ -410,7 +416,22 @@ static ssize_t store_local_ip(struct netconsole_target *nt, return -EINVAL; } - nt->np.local_ip = in_aton(buf); + if (strnchr(buf, count, ':')) { + const char *end; + if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) { + if (*end && *end != '\n') { + printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end); + return -EINVAL; + } + nt->np.ipv6 = true; + } else + return -EINVAL; + } else { + if (!nt->np.ipv6) { + nt->np.local_ip.ip = in_aton(buf); + } else + return -EINVAL; + } return strnlen(buf, count); } @@ -426,7 +447,22 @@ static ssize_t store_remote_ip(struct netconsole_target *nt, return -EINVAL; } - nt->np.remote_ip = in_aton(buf); + if (strnchr(buf, count, ':')) { + const char *end; + if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) { + if (*end && *end != '\n') { + printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end); + return -EINVAL; + } + nt->np.ipv6 = true; + } else + return -EINVAL; + } else { + if (!nt->np.ipv6) { + nt->np.remote_ip.ip = in_aton(buf); + } else + return -EINVAL; + } return strnlen(buf, count); } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index b983596abcbb..29934446436a 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -5,15 +5,20 @@ * * Author: David J. Choi * - * Copyright (c) 2010 Micrel, Inc. + * Copyright (c) 2010-2013 Micrel, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * - * Support : ksz9021 1000/100/10 phy from Micrel - * ks8001, ks8737, ks8721, ks8041, ks8051 100/10 phy + * Support : Micrel Phys: + * Giga phys: ksz9021, ksz9031 + * 100/10 Phys : ksz8001, ksz8721, ksz8737, ksz8041 + * ksz8021, ksz8031, ksz8051, + * ksz8081, ksz8091, + * ksz8061, + * Switch : ksz8873, ksz886x */ #include <linux/kernel.h> @@ -176,7 +181,7 @@ static struct phy_driver ksphy_driver[] = { }, { .phy_id = PHY_ID_KSZ8021, .phy_id_mask = 0x00ffffff, - .name = "Micrel KSZ8021", + .name = "Micrel KSZ8021 or KSZ8031", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, @@ -225,6 +230,30 @@ static struct phy_driver ksphy_driver[] = { .config_intr = kszphy_config_intr, .driver = { .owner = THIS_MODULE,}, }, { + .phy_id = PHY_ID_KSZ8081, + .name = "Micrel KSZ8081 or KSZ8091", + .phy_id_mask = 0x00fffff0, + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = kszphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = kszphy_config_intr, + .driver = { .owner = THIS_MODULE,}, +}, { + .phy_id = PHY_ID_KSZ8061, + .name = "Micrel KSZ8061", + .phy_id_mask = 0x00fffff0, + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = kszphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = kszphy_config_intr, + .driver = { .owner = THIS_MODULE,}, +}, { .phy_id = PHY_ID_KSZ9021, .phy_id_mask = 0x000ffffe, .name = "Micrel KSZ9021 Gigabit PHY", @@ -238,6 +267,19 @@ static struct phy_driver ksphy_driver[] = { .config_intr = ksz9021_config_intr, .driver = { .owner = THIS_MODULE, }, }, { + .phy_id = PHY_ID_KSZ9031, + .phy_id_mask = 0x00fffff0, + .name = "Micrel KSZ9031 Gigabit PHY", + .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause + | SUPPORTED_Asym_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = kszphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = ksz9021_config_intr, + .driver = { .owner = THIS_MODULE, }, +}, { .phy_id = PHY_ID_KSZ8873MLL, .phy_id_mask = 0x00fffff0, .name = "Micrel KSZ8873MLL Switch", @@ -247,6 +289,16 @@ static struct phy_driver ksphy_driver[] = { .config_aneg = ksz8873mll_config_aneg, .read_status = ksz8873mll_read_status, .driver = { .owner = THIS_MODULE, }, +}, { + .phy_id = PHY_ID_KSZ886X, + .phy_id_mask = 0x00fffff0, + .name = "Micrel KSZ886X Switch", + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = kszphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .driver = { .owner = THIS_MODULE, }, } }; static int __init ksphy_init(void) @@ -270,12 +322,16 @@ MODULE_LICENSE("GPL"); static struct mdio_device_id __maybe_unused micrel_tbl[] = { { PHY_ID_KSZ9021, 0x000ffffe }, + { PHY_ID_KSZ9031, 0x00fffff0 }, { PHY_ID_KSZ8001, 0x00ffffff }, { PHY_ID_KS8737, 0x00fffff0 }, { PHY_ID_KSZ8021, 0x00ffffff }, { PHY_ID_KSZ8041, 0x00fffff0 }, { PHY_ID_KSZ8051, 0x00fffff0 }, + { PHY_ID_KSZ8061, 0x00fffff0 }, + { PHY_ID_KSZ8081, 0x00fffff0 }, { PHY_ID_KSZ8873MLL, 0x00fffff0 }, + { PHY_ID_KSZ886X, 0x00fffff0 }, { } }; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8af46e88a181..9930f9999561 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -416,16 +416,15 @@ static void phy_prepare_link(struct phy_device *phydev, * @dev: the network device to connect * @phydev: the pointer to the phy device * @handler: callback function for state change notifications - * @flags: PHY device's dev_flags * @interface: PHY device's interface */ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev, - void (*handler)(struct net_device *), u32 flags, + void (*handler)(struct net_device *), phy_interface_t interface) { int rc; - rc = phy_attach_direct(dev, phydev, flags, interface); + rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); if (rc) return rc; @@ -443,7 +442,6 @@ EXPORT_SYMBOL(phy_connect_direct); * @dev: the network device to connect * @bus_id: the id string of the PHY device to connect * @handler: callback function for state change notifications - * @flags: PHY device's dev_flags * @interface: PHY device's interface * * Description: Convenience function for connecting ethernet @@ -455,7 +453,7 @@ EXPORT_SYMBOL(phy_connect_direct); * the desired functionality. */ struct phy_device * phy_connect(struct net_device *dev, const char *bus_id, - void (*handler)(struct net_device *), u32 flags, + void (*handler)(struct net_device *), phy_interface_t interface) { struct phy_device *phydev; @@ -471,7 +469,7 @@ struct phy_device * phy_connect(struct net_device *dev, const char *bus_id, } phydev = to_phy_device(d); - rc = phy_connect_direct(dev, phydev, handler, flags, interface); + rc = phy_connect_direct(dev, phydev, handler, interface); if (rc) return ERR_PTR(rc); @@ -576,14 +574,13 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, * phy_attach - attach a network device to a particular PHY device * @dev: network device to attach * @bus_id: Bus ID of PHY device to attach - * @flags: PHY device's dev_flags * @interface: PHY device's interface * * Description: Same as phy_attach_direct() except that a PHY bus_id * string is passed instead of a pointer to a struct phy_device. */ struct phy_device *phy_attach(struct net_device *dev, - const char *bus_id, u32 flags, phy_interface_t interface) + const char *bus_id, phy_interface_t interface) { struct bus_type *bus = &mdio_bus_type; struct phy_device *phydev; @@ -599,7 +596,7 @@ struct phy_device *phy_attach(struct net_device *dev, } phydev = to_phy_device(d); - rc = phy_attach_direct(dev, phydev, flags, interface); + rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); if (rc) return ERR_PTR(rc); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 72f93470ea35..8e7af8354342 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -23,6 +23,8 @@ #define RTL821x_INER_INIT 0x6400 #define RTL821x_INSR 0x13 +#define RTL8211E_INER_LINK_STAT 0x10 + MODULE_DESCRIPTION("Realtek PHY driver"); MODULE_AUTHOR("Johnson Leung"); MODULE_LICENSE("GPL"); @@ -36,7 +38,7 @@ static int rtl821x_ack_interrupt(struct phy_device *phydev) return (err < 0) ? err : 0; } -static int rtl821x_config_intr(struct phy_device *phydev) +static int rtl8211b_config_intr(struct phy_device *phydev) { int err; @@ -49,28 +51,63 @@ static int rtl821x_config_intr(struct phy_device *phydev) return err; } +static int rtl8211e_config_intr(struct phy_device *phydev) +{ + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + err = phy_write(phydev, RTL821x_INER, + RTL8211E_INER_LINK_STAT); + else + err = phy_write(phydev, RTL821x_INER, 0); + + return err; +} + /* RTL8211B */ -static struct phy_driver rtl821x_driver = { +static struct phy_driver rtl8211b_driver = { .phy_id = 0x001cc912, - .name = "RTL821x Gigabit Ethernet", + .name = "RTL8211B Gigabit Ethernet", .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, .ack_interrupt = &rtl821x_ack_interrupt, - .config_intr = &rtl821x_config_intr, + .config_intr = &rtl8211b_config_intr, + .driver = { .owner = THIS_MODULE,}, +}; + +/* RTL8211E */ +static struct phy_driver rtl8211e_driver = { + .phy_id = 0x001cc915, + .name = "RTL8211E Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_aneg = &genphy_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &rtl821x_ack_interrupt, + .config_intr = &rtl8211e_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, .driver = { .owner = THIS_MODULE,}, }; static int __init realtek_init(void) { - return phy_driver_register(&rtl821x_driver); + int ret; + + ret = phy_driver_register(&rtl8211b_driver); + if (ret < 0) + return -ENODEV; + return phy_driver_register(&rtl8211e_driver); } static void __exit realtek_exit(void) { - phy_driver_unregister(&rtl821x_driver); + phy_driver_unregister(&rtl8211b_driver); + phy_driver_unregister(&rtl8211e_driver); } module_init(realtek_init); @@ -78,6 +115,7 @@ module_exit(realtek_exit); static struct mdio_device_id __maybe_unused realtek_tbl[] = { { 0x001cc912, 0x001fffff }, + { 0x001cc915, 0x001fffff }, { } }; diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 41eb8ffeb53d..5c87eef40bf9 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -275,10 +275,8 @@ static int ks8995_probe(struct spi_device *spi) pdata = spi->dev.platform_data; ks = kzalloc(sizeof(*ks), GFP_KERNEL); - if (!ks) { - dev_err(&spi->dev, "no memory for private data\n"); + if (!ks) return -ENOMEM; - } mutex_init(&ks->lock); ks->pdata = pdata; diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 0b2706abe3e3..3db9131e9229 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1058,7 +1058,15 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) return stats64; } +static struct lock_class_key ppp_tx_busylock; +static int ppp_dev_init(struct net_device *dev) +{ + dev->qdisc_tx_busylock = &ppp_tx_busylock; + return 0; +} + static const struct net_device_ops ppp_netdev_ops = { + .ndo_init = ppp_dev_init, .ndo_start_xmit = ppp_start_xmit, .ndo_do_ioctl = ppp_net_ioctl, .ndo_get_stats64 = ppp_get_stats64, @@ -1805,8 +1813,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) /* the filter instructions are constructed assuming a four-byte PPP header on each packet */ if (ppp->pass_filter || ppp->active_filter) { - if (skb_cloned(skb) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + if (skb_unclone(skb, GFP_ATOMIC)) goto err; *skb_push(skb, 2) = 0; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 20f31d0d1536..bb07ba94c3aa 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -1134,7 +1134,7 @@ static __net_init int pppoe_init_net(struct net *net) rwlock_init(&pn->hash_lock); - pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops); + pde = proc_create("pppoe", S_IRUGO, net->proc_net, &pppoe_seq_fops); #ifdef CONFIG_PROC_FS if (!pde) return -ENOMEM; @@ -1145,7 +1145,7 @@ static __net_init int pppoe_init_net(struct net *net) static __net_exit void pppoe_exit_net(struct net *net) { - proc_net_remove(net, "pppoe"); + remove_proc_entry("pppoe", net->proc_net); } static struct pernet_operations pppoe_net_ops = { diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index d8b9b1e8ee02..f433b594388e 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -410,10 +410,10 @@ static void rionet_get_drvinfo(struct net_device *ndev, { struct rionet_private *rnet = netdev_priv(ndev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->fw_version, "n/a"); - strcpy(info->bus_info, rnet->mport->name); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, "n/a", sizeof(info->fw_version)); + strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info)); } static u32 rionet_get_msglevel(struct net_device *ndev) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index ad86660fb8f9..05c5efe84591 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -28,6 +28,7 @@ #include <net/genetlink.h> #include <net/netlink.h> #include <net/sch_generic.h> +#include <generated/utsrelease.h> #include <linux/if_team.h> #define DRV_NAME "team" @@ -507,6 +508,7 @@ static bool team_is_mode_set(struct team *team) static void team_set_no_mode(struct team *team) { + team->user_carrier_enabled = false; team->mode = &__team_no_mode; } @@ -1054,10 +1056,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev) } } - err = netdev_set_master(port_dev, dev); + err = netdev_master_upper_dev_link(port_dev, dev); if (err) { - netdev_err(dev, "Device %s failed to set master\n", portname); - goto err_set_master; + netdev_err(dev, "Device %s failed to set upper link\n", + portname); + goto err_set_upper_link; } err = netdev_rx_handler_register(port_dev, team_handle_frame, @@ -1090,9 +1093,9 @@ err_option_port_add: netdev_rx_handler_unregister(port_dev); err_handler_register: - netdev_set_master(port_dev, NULL); + netdev_upper_dev_unlink(port_dev, dev); -err_set_master: +err_set_upper_link: team_port_disable_netpoll(port); err_enable_netpoll: @@ -1129,18 +1132,20 @@ static int team_port_del(struct team *team, struct net_device *port_dev) return -ENOENT; } - __team_option_inst_mark_removed_port(team, port); - __team_options_change_check(team); - __team_option_inst_del_port(team, port); - __team_port_change_port_removed(port); team_port_disable(team, port); list_del_rcu(&port->list); netdev_rx_handler_unregister(port_dev); - netdev_set_master(port_dev, NULL); + netdev_upper_dev_unlink(port_dev, dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); dev_close(port_dev); team_port_leave(team, port); + + __team_option_inst_mark_removed_port(team, port); + __team_options_change_check(team); + __team_option_inst_del_port(team, port); + __team_port_change_port_removed(port); + team_port_set_orig_dev_addr(port); dev_set_mtu(port_dev, port->orig.mtu); synchronize_rcu(); @@ -1399,13 +1404,11 @@ static void team_destructor(struct net_device *dev) static int team_open(struct net_device *dev) { - netif_carrier_on(dev); return 0; } static int team_close(struct net_device *dev) { - netif_carrier_off(dev); return 0; } @@ -1501,7 +1504,6 @@ static int team_set_mac_address(struct net_device *dev, void *p) if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - dev->addr_assign_type &= ~NET_ADDR_RANDOM; rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) if (team->ops.port_change_dev_addr) @@ -1707,6 +1709,19 @@ static netdev_features_t team_fix_features(struct net_device *dev, return features; } +static int team_change_carrier(struct net_device *dev, bool new_carrier) +{ + struct team *team = netdev_priv(dev); + + team->user_carrier_enabled = true; + + if (new_carrier) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + return 0; +} + static const struct net_device_ops team_netdev_ops = { .ndo_init = team_init, .ndo_uninit = team_uninit, @@ -1729,8 +1744,24 @@ static const struct net_device_ops team_netdev_ops = { .ndo_add_slave = team_add_slave, .ndo_del_slave = team_del_slave, .ndo_fix_features = team_fix_features, + .ndo_change_carrier = team_change_carrier, }; +/*********************** + * ethtool interface + ***********************/ + +static void team_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); +} + +static const struct ethtool_ops team_ethtool_ops = { + .get_drvinfo = team_ethtool_get_drvinfo, + .get_link = ethtool_op_get_link, +}; /*********************** * rt netlink interface @@ -1746,7 +1777,6 @@ static void team_setup_by_port(struct net_device *dev, dev->mtu = port_dev->mtu; memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len); - dev->addr_assign_type &= ~NET_ADDR_RANDOM; } static int team_dev_type_check_change(struct net_device *dev, @@ -1780,6 +1810,7 @@ static void team_setup(struct net_device *dev) ether_setup(dev); dev->netdev_ops = &team_netdev_ops; + dev->ethtool_ops = &team_ethtool_ops; dev->destructor = team_destructor; dev->tx_queue_len = 0; dev->flags |= IFF_MULTICAST; @@ -1941,30 +1972,6 @@ static void team_nl_team_put(struct team *team) dev_put(team->dev); } -static int team_nl_send_generic(struct genl_info *info, struct team *team, - int (*fill_func)(struct sk_buff *skb, - struct genl_info *info, - int flags, struct team *team)) -{ - struct sk_buff *skb; - int err; - - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!skb) - return -ENOMEM; - - err = fill_func(skb, info, NLM_F_ACK, team); - if (err < 0) - goto err_fill; - - err = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid); - return err; - -err_fill: - nlmsg_free(skb); - return err; -} - typedef int team_nl_send_func_t(struct sk_buff *skb, struct team *team, u32 portid); @@ -2309,16 +2316,57 @@ team_put: return err; } -static int team_nl_fill_port_list_get(struct sk_buff *skb, - u32 portid, u32 seq, int flags, - struct team *team, - bool fillall) +static int team_nl_fill_one_port_get(struct sk_buff *skb, + struct team_port *port) +{ + struct nlattr *port_item; + + port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); + if (!port_item) + goto nest_cancel; + if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex)) + goto nest_cancel; + if (port->changed) { + if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED)) + goto nest_cancel; + port->changed = false; + } + if ((port->removed && + nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) || + (port->state.linkup && + nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) || + nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) || + nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex)) + goto nest_cancel; + nla_nest_end(skb, port_item); + return 0; + +nest_cancel: + nla_nest_cancel(skb, port_item); + return -EMSGSIZE; +} + +static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq, + int flags, team_nl_send_func_t *send_func, + struct team_port *one_port) { struct nlattr *port_list; + struct nlmsghdr *nlh; void *hdr; struct team_port *port; + int err; + struct sk_buff *skb = NULL; + bool incomplete; + int i; + + port = list_first_entry(&team->port_list, struct team_port, list); + +start_again: + err = __send_and_alloc_skb(&skb, team, portid, send_func); + if (err) + return err; - hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags, + hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, TEAM_CMD_PORT_LIST_GET); if (!hdr) return -EMSGSIZE; @@ -2329,47 +2377,54 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb, if (!port_list) goto nla_put_failure; - list_for_each_entry(port, &team->port_list, list) { - struct nlattr *port_item; + i = 0; + incomplete = false; - /* Include only changed ports if fill all mode is not on */ - if (!fillall && !port->changed) - continue; - port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); - if (!port_item) - goto nla_put_failure; - if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex)) - goto nla_put_failure; - if (port->changed) { - if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED)) - goto nla_put_failure; - port->changed = false; + /* If one port is selected, called wants to send port list containing + * only this port. Otherwise go through all listed ports and send all + */ + if (one_port) { + err = team_nl_fill_one_port_get(skb, one_port); + if (err) + goto errout; + } else { + list_for_each_entry(port, &team->port_list, list) { + err = team_nl_fill_one_port_get(skb, port); + if (err) { + if (err == -EMSGSIZE) { + if (!i) + goto errout; + incomplete = true; + break; + } + goto errout; + } + i++; } - if ((port->removed && - nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) || - (port->state.linkup && - nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) || - nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) || - nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex)) - goto nla_put_failure; - nla_nest_end(skb, port_item); } nla_nest_end(skb, port_list); - return genlmsg_end(skb, hdr); + genlmsg_end(skb, hdr); + if (incomplete) + goto start_again; + +send_done: + nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); + if (!nlh) { + err = __send_and_alloc_skb(&skb, team, portid, send_func); + if (err) + goto errout; + goto send_done; + } + + return send_func(skb, team, portid); nla_put_failure: + err = -EMSGSIZE; +errout: genlmsg_cancel(skb, hdr); - return -EMSGSIZE; -} - -static int team_nl_fill_port_list_get_all(struct sk_buff *skb, - struct genl_info *info, int flags, - struct team *team) -{ - return team_nl_fill_port_list_get(skb, info->snd_portid, - info->snd_seq, NLM_F_ACK, - team, true); + nlmsg_free(skb); + return err; } static int team_nl_cmd_port_list_get(struct sk_buff *skb, @@ -2382,7 +2437,8 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb, if (!team) return -EINVAL; - err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all); + err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq, + NLM_F_ACK, team_nl_send_unicast, NULL); team_nl_team_put(team); @@ -2433,27 +2489,11 @@ static int team_nl_send_event_options_get(struct team *team, sel_opt_inst_list); } -static int team_nl_send_event_port_list_get(struct team *team) +static int team_nl_send_event_port_get(struct team *team, + struct team_port *port) { - struct sk_buff *skb; - int err; - struct net *net = dev_net(team->dev); - - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!skb) - return -ENOMEM; - - err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false); - if (err < 0) - goto err_fill; - - err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id, - GFP_KERNEL); - return err; - -err_fill: - nlmsg_free(skb); - return err; + return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast, + port); } static int team_nl_init(void) @@ -2526,28 +2566,53 @@ static void __team_port_change_send(struct team_port *port, bool linkup) port->state.duplex = 0; send_event: - err = team_nl_send_event_port_list_get(port->team); + err = team_nl_send_event_port_get(port->team, port); if (err && err != -ESRCH) netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n", port->dev->name, err); } +static void __team_carrier_check(struct team *team) +{ + struct team_port *port; + bool team_linkup; + + if (team->user_carrier_enabled) + return; + + team_linkup = false; + list_for_each_entry(port, &team->port_list, list) { + if (port->linkup) { + team_linkup = true; + break; + } + } + + if (team_linkup) + netif_carrier_on(team->dev); + else + netif_carrier_off(team->dev); +} + static void __team_port_change_check(struct team_port *port, bool linkup) { if (port->state.linkup != linkup) __team_port_change_send(port, linkup); + __team_carrier_check(port->team); } static void __team_port_change_port_added(struct team_port *port, bool linkup) { __team_port_change_send(port, linkup); + __team_carrier_check(port->team); } static void __team_port_change_port_removed(struct team_port *port) { port->removed = true; __team_port_change_send(port, false); + __team_carrier_check(port->team); } static void team_port_change_check(struct team_port *port, bool linkup) diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c index 6262b4defd93..40fd3381b693 100644 --- a/drivers/net/team/team_mode_activebackup.c +++ b/drivers/net/team/team_mode_activebackup.c @@ -19,6 +19,7 @@ struct ab_priv { struct team_port __rcu *active_port; + struct team_option_inst_info *ap_opt_inst_info; }; static struct ab_priv *ab_priv(struct team *team) @@ -54,8 +55,17 @@ drop: static void ab_port_leave(struct team *team, struct team_port *port) { - if (ab_priv(team)->active_port == port) + if (ab_priv(team)->active_port == port) { RCU_INIT_POINTER(ab_priv(team)->active_port, NULL); + team_option_inst_set_change(ab_priv(team)->ap_opt_inst_info); + } +} + +static int ab_active_port_init(struct team *team, + struct team_option_inst_info *info) +{ + ab_priv(team)->ap_opt_inst_info = info; + return 0; } static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx) @@ -88,6 +98,7 @@ static const struct team_option ab_options[] = { { .name = "activeport", .type = TEAM_OPTION_TYPE_U32, + .init = ab_active_port_init, .getter = ab_active_port_get, .setter = ab_active_port_set, }, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2917a86f4c43..b6f45c5d84d5 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1200,6 +1200,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (zerocopy) { skb_shinfo(skb)->destructor_arg = msg_control; skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; + skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; } skb_reset_network_header(skb); diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h index e889631161b8..346c032aa795 100644 --- a/drivers/net/usb/asix.h +++ b/drivers/net/usb/asix.h @@ -167,6 +167,20 @@ struct asix_data { u8 res; }; +struct asix_rx_fixup_info { + struct sk_buff *ax_skb; + u32 header; + u16 size; + bool split_head; +}; + +struct asix_common_private { + struct asix_rx_fixup_info rx_fixup_info; +}; + +/* ASIX specific flags */ +#define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */ + int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data); @@ -176,7 +190,9 @@ int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data); -int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb); +int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, + struct asix_rx_fixup_info *rx); +int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb); struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags); diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 50d167330d38..f7f623a5390e 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -51,49 +51,89 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, value, index, data, size); } -int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, + struct asix_rx_fixup_info *rx) { int offset = 0; - while (offset + sizeof(u32) < skb->len) { - struct sk_buff *ax_skb; - u16 size; - u32 header = get_unaligned_le32(skb->data + offset); - - offset += sizeof(u32); - - /* get the packet length */ - size = (u16) (header & 0x7ff); - if (size != ((~header >> 16) & 0x07ff)) { - netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n"); - return 0; + while (offset + sizeof(u16) <= skb->len) { + u16 remaining = 0; + unsigned char *data; + + if (!rx->size) { + if ((skb->len - offset == sizeof(u16)) || + rx->split_head) { + if(!rx->split_head) { + rx->header = get_unaligned_le16( + skb->data + offset); + rx->split_head = true; + offset += sizeof(u16); + break; + } else { + rx->header |= (get_unaligned_le16( + skb->data + offset) + << 16); + rx->split_head = false; + offset += sizeof(u16); + } + } else { + rx->header = get_unaligned_le32(skb->data + + offset); + offset += sizeof(u32); + } + + /* get the packet length */ + rx->size = (u16) (rx->header & 0x7ff); + if (rx->size != ((~rx->header >> 16) & 0x7ff)) { + netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n", + rx->header, offset); + rx->size = 0; + return 0; + } + rx->ax_skb = netdev_alloc_skb_ip_align(dev->net, + rx->size); + if (!rx->ax_skb) + return 0; } - if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) || - (size + offset > skb->len)) { + if (rx->size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n", - size); + rx->size); + kfree_skb(rx->ax_skb); return 0; } - ax_skb = netdev_alloc_skb_ip_align(dev->net, size); - if (!ax_skb) - return 0; - skb_put(ax_skb, size); - memcpy(ax_skb->data, skb->data + offset, size); - usbnet_skb_return(dev, ax_skb); + if (rx->size > skb->len - offset) { + remaining = rx->size - (skb->len - offset); + rx->size = skb->len - offset; + } + + data = skb_put(rx->ax_skb, rx->size); + memcpy(data, skb->data + offset, rx->size); + if (!remaining) + usbnet_skb_return(dev, rx->ax_skb); - offset += (size + 1) & 0xfffe; + offset += (rx->size + 1) & 0xfffe; + rx->size = remaining; } if (skb->len != offset) { - netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n", - skb->len); + netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n", + skb->len, offset); return 0; } + return 1; } +int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb) +{ + struct asix_common_private *dp = dev->driver_priv; + struct asix_rx_fixup_info *rx = &dp->rx_fixup_info; + + return asix_rx_fixup_internal(dev, skb, rx); +} + struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { @@ -510,8 +550,8 @@ void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { /* Inherit standard device info */ usbnet_get_drvinfo(net, info); - strncpy (info->driver, DRIVER_NAME, sizeof info->driver); - strncpy (info->version, DRIVER_VERSION, sizeof info->version); + strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); info->eedump_len = AX_EEPROM_LEN; } diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 7a6e758f48e7..2205dbc8d32f 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -422,14 +422,25 @@ static const struct net_device_ops ax88772_netdev_ops = { static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) { - int ret, embd_phy; + int ret, embd_phy, i; u8 buf[ETH_ALEN]; u32 phyid; usbnet_get_endpoints(dev,intf); /* Get the MAC address */ - ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); + if (dev->driver_info->data & FLAG_EEPROM_MAC) { + for (i = 0; i < (ETH_ALEN >> 1); i++) { + ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x04 + i, + 0, 2, buf + i * 2); + if (ret < 0) + break; + } + } else { + ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, + 0, 0, ETH_ALEN, buf); + } + if (ret < 0) { netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); return ret; @@ -484,9 +495,19 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) dev->rx_urb_size = 2048; } + dev->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL); + if (!dev->driver_priv) + return -ENOMEM; + return 0; } +static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) +{ + if (dev->driver_priv) + kfree(dev->driver_priv); +} + static const struct ethtool_ops ax88178_ethtool_ops = { .get_drvinfo = asix_get_drvinfo, .get_link = asix_get_link, @@ -818,6 +839,10 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf) dev->rx_urb_size = 2048; } + dev->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL); + if (!dev->driver_priv) + return -ENOMEM; + return 0; } @@ -864,22 +889,38 @@ static const struct driver_info hawking_uf200_info = { static const struct driver_info ax88772_info = { .description = "ASIX AX88772 USB 2.0 Ethernet", .bind = ax88772_bind, + .unbind = ax88772_unbind, .status = asix_status, .link_reset = ax88772_link_reset, .reset = ax88772_reset, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, - .rx_fixup = asix_rx_fixup, + .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, }; +static const struct driver_info ax88772b_info = { + .description = "ASIX AX88772B USB 2.0 Ethernet", + .bind = ax88772_bind, + .unbind = ax88772_unbind, + .status = asix_status, + .link_reset = ax88772_link_reset, + .reset = ax88772_reset, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | + FLAG_MULTI_PACKET, + .rx_fixup = asix_rx_fixup_common, + .tx_fixup = asix_tx_fixup, + .data = FLAG_EEPROM_MAC, +}; + static const struct driver_info ax88178_info = { .description = "ASIX AX88178 USB 2.0 Ethernet", .bind = ax88178_bind, + .unbind = ax88772_unbind, .status = asix_status, .link_reset = ax88178_link_reset, .reset = ax88178_reset, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR, - .rx_fixup = asix_rx_fixup, + .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, }; @@ -953,7 +994,7 @@ static const struct usb_device_id products [] = { }, { // ASIX AX88772B 10/100 USB_DEVICE (0x0b95, 0x772b), - .driver_info = (unsigned long) &ax88772_info, + .driver_info = (unsigned long) &ax88772b_info, }, { // ASIX AX88772 10/100 USB_DEVICE (0x0b95, 0x7720), diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index c8e0aa85fb8e..d012203b0f29 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -35,6 +35,7 @@ struct ax88172a_private { u16 phy_addr; u16 oldmode; int use_embdphy; + struct asix_rx_fixup_info rx_fixup_info; }; /* MDIO read and write wrappers for phylib */ @@ -116,7 +117,6 @@ static int ax88172a_init_mdio(struct usbnet *dev) priv->mdio->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!priv->mdio->irq) { - netdev_err(dev->net, "Could not allocate mdio->irq\n"); ret = -ENOMEM; goto mfree; } @@ -235,10 +235,9 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) usbnet_get_endpoints(dev, intf); priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - netdev_err(dev->net, "Could not allocate memory for private data\n"); + if (!priv) return -ENOMEM; - } + dev->driver_priv = priv; /* Get the MAC address */ @@ -377,7 +376,7 @@ static int ax88172a_reset(struct usbnet *dev) priv->phydev = phy_connect(dev->net, priv->phy_name, &ax88172a_adjust_link, - 0, PHY_INTERFACE_MODE_MII); + PHY_INTERFACE_MODE_MII); if (IS_ERR(priv->phydev)) { netdev_err(dev->net, "Could not connect to PHY device %s\n", priv->phy_name); @@ -400,6 +399,14 @@ out: } +static int ax88172a_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + struct ax88172a_private *dp = dev->driver_priv; + struct asix_rx_fixup_info *rx = &dp->rx_fixup_info; + + return asix_rx_fixup_internal(dev, skb, rx); +} + const struct driver_info ax88172a_info = { .description = "ASIX AX88172A USB 2.0 Ethernet", .bind = ax88172a_bind, @@ -409,6 +416,6 @@ const struct driver_info ax88172a_info = { .status = ax88172a_status, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, - .rx_fixup = asix_rx_fixup, + .rx_fixup = ax88172a_rx_fixup, .tx_fixup = asix_tx_fixup, }; diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 18d9579123ea..8d5cac2d8e33 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -685,9 +685,9 @@ static void catc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct catc *catc = netdev_priv(dev); - strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN); - strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN); - usb_make_path (catc->usbdev, info->bus_info, sizeof info->bus_info); + strlcpy(info->driver, driver_name, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); + usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info)); } static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 3f3d12d766e7..57136dc1b887 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -615,6 +615,13 @@ static const struct usb_device_id products [] = { .driver_info = 0, }, +/* AnyDATA ADU960S - handled by qmi_wwan */ +{ + USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* * WHITELIST!!! * diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 00d3b2d37828..4a8c25a22294 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -65,9 +65,9 @@ cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { struct usbnet *dev = netdev_priv(net); - strncpy(info->driver, dev->driver_name, sizeof(info->driver)); - strncpy(info->version, DRIVER_VERSION, sizeof(info->version)); - strncpy(info->fw_version, dev->driver_info->description, + strlcpy(info->driver, dev->driver_name, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, dev->driver_info->description, sizeof(info->fw_version)); usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); } @@ -576,9 +576,14 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) if ((intf->num_altsetting == 2) && !usb_set_interface(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber, - CDC_NCM_COMM_ALTSETTING_MBIM) && - cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) - return -ENODEV; + CDC_NCM_COMM_ALTSETTING_MBIM)) { + if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) + return -ENODEV; + else + usb_set_interface(dev->udev, + intf->cur_altsetting->desc.bInterfaceNumber, + CDC_NCM_COMM_ALTSETTING_NCM); + } #endif /* NCM data altsetting is always 1 */ diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index d7e99445518e..174e5ecea4cc 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -118,7 +118,7 @@ static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *valu dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0xc : 0x4); for (i = 0; i < DM_TIMEOUT; i++) { - u8 tmp; + u8 tmp = 0; udelay(1); ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp); @@ -161,7 +161,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12); for (i = 0; i < DM_TIMEOUT; i++) { - u8 tmp; + u8 tmp = 0; udelay(1); ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index cd8ccb240f4b..41e5dfb5ee64 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2317,10 +2317,8 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, serial->rx_urb[i]->transfer_buffer_length = 0; serial->rx_data[i] = kzalloc(serial->rx_data_length, GFP_KERNEL); - if (!serial->rx_data[i]) { - dev_err(dev, "%s - Out of memory\n", __func__); + if (!serial->rx_data[i]) goto exit; - } } /* TX, allocate urb and initialize */ @@ -2336,15 +2334,12 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, serial->tx_buffer_count = 0; serial->tx_data_length = tx_size; serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL); - if (!serial->tx_data) { - dev_err(dev, "%s - Out of memory\n", __func__); + if (!serial->tx_data) goto exit; - } + serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL); - if (!serial->tx_buffer) { - dev_err(dev, "%s - Out of memory\n", __func__); + if (!serial->tx_buffer) goto exit; - } return 0; exit: @@ -2580,10 +2575,8 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, } hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE, GFP_KERNEL); - if (!hso_net->mux_bulk_rx_buf_pool[i]) { - dev_err(&interface->dev, "Could not allocate rx buf\n"); + if (!hso_net->mux_bulk_rx_buf_pool[i]) goto exit; - } } hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_tx_urb) { @@ -2591,10 +2584,8 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, goto exit; } hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL); - if (!hso_net->mux_bulk_tx_buf) { - dev_err(&interface->dev, "Could not allocate tx buf\n"); + if (!hso_net->mux_bulk_tx_buf) goto exit; - } add_net_device(hso_dev); @@ -2818,10 +2809,8 @@ struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface) mux->shared_intr_buf = kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize), GFP_KERNEL); - if (!mux->shared_intr_buf) { - dev_err(&interface->dev, "Could not allocate intr buf?\n"); + if (!mux->shared_intr_buf) goto exit; - } mutex_init(&mux->shared_int_lock); diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c index 92c49e0a59ec..0192073e53a3 100644 --- a/drivers/net/usb/kalmia.c +++ b/drivers/net/usb/kalmia.c @@ -159,7 +159,6 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf) } memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN); - memcpy(dev->net->perm_addr, ethernet_addr, ETH_ALEN); return status; } diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index a0b5807b30d4..73051d10ead2 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -149,11 +149,9 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, DECLARE_WAITQUEUE(wait, current); buffer = kmalloc(size, GFP_KERNEL); - if (!buffer) { - netif_warn(pegasus, drv, pegasus->net, - "out of memory in %s\n", __func__); + if (!buffer) return -ENOMEM; - } + add_wait_queue(&pegasus->ctrl_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); while (pegasus->flags & ETH_REGS_CHANGED) @@ -1074,8 +1072,9 @@ static void pegasus_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { pegasus_t *pegasus = netdev_priv(dev); - strncpy(info->driver, driver_name, sizeof(info->driver) - 1); - strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1); + + strlcpy(info->driver, driver_name, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info)); } @@ -1096,6 +1095,7 @@ pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { pegasus_t *pegasus = netdev_priv(dev); u8 reg78 = 0x04; + int ret; if (wol->wolopts & ~WOL_SUPPORTED) return -EINVAL; @@ -1110,7 +1110,12 @@ pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) else pegasus->eth_regs[0] &= ~0x10; pegasus->wolopts = wol->wolopts; - return set_register(pegasus, WakeupControl, reg78); + + ret = set_register(pegasus, WakeupControl, reg78); + if (!ret) + ret = device_set_wakeup_enable(&pegasus->usb->dev, + wol->wolopts); + return ret; } static inline void pegasus_reset_wol(struct net_device *dev) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 19d903598b0d..efb5c7c33a28 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -409,6 +409,13 @@ static const struct usb_device_id products[] = { USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&qmi_wwan_info, }, + { /* ADU960S */ + USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, + USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&qmi_wwan_info, + }, /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 4a4335833c36..cc49aac70224 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -431,7 +431,6 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) goto halt_fail_and_release; } memcpy(net->dev_addr, bp, ETH_ALEN); - memcpy(net->perm_addr, bp, ETH_ALEN); /* set a nonzero filter to enable data transfers */ memset(u.set, 0, sizeof *u.set); diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 5f39a3b225ef..a491d3a95393 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -776,9 +776,9 @@ static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinf { rtl8150_t *dev = netdev_priv(netdev); - strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN); - strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN); - usb_make_path(dev->udev, info->bus_info, sizeof info->bus_info); + strlcpy(info->driver, driver_name, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); + usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); } static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index 18dd4257ab17..79ab2435d9d3 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -459,11 +459,9 @@ static void sierra_net_kevent(struct work_struct *work) /* Query the modem for the LSI message */ buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL); - if (!buf) { - netdev_err(dev->net, - "failed to allocate buf for LS msg\n"); + if (!buf) return; - } + ifnum = priv->ifnum; len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), USB_CDC_GET_ENCAPSULATED_RESPONSE, @@ -598,8 +596,8 @@ static void sierra_net_get_drvinfo(struct net_device *net, { /* Inherit standard device info */ usbnet_get_drvinfo(net, info); - strncpy(info->driver, driver_name, sizeof info->driver); - strncpy(info->version, DRIVER_VERSION, sizeof info->version); + strlcpy(info->driver, driver_name, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); } static u32 sierra_net_get_link(struct net_device *net) @@ -686,10 +684,8 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) } /* Initialize sierra private data */ priv = kzalloc(sizeof *priv, GFP_KERNEL); - if (!priv) { - dev_err(&dev->udev->dev, "No memory"); + if (!priv) return -ENOMEM; - } priv->usbnet = dev; priv->ifnum = ifacenum; diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 251a3354a4b0..9abe51710f22 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -1393,13 +1393,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) } dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv), - GFP_KERNEL); + GFP_KERNEL); pdata = (struct smsc75xx_priv *)(dev->data[0]); - if (!pdata) { - netdev_warn(dev->net, "Unable to allocate smsc75xx_priv\n"); + if (!pdata) return -ENOMEM; - } pdata->dev = dev; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 9b736701f854..ff4fa37dfd1d 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -55,6 +55,13 @@ #define FEATURE_PHY_NLP_CROSSOVER (0x02) #define FEATURE_AUTOSUSPEND (0x04) +#define SUSPEND_SUSPEND0 (0x01) +#define SUSPEND_SUSPEND1 (0x02) +#define SUSPEND_SUSPEND2 (0x04) +#define SUSPEND_SUSPEND3 (0x08) +#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ + SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) + struct smsc95xx_priv { u32 mac_cr; u32 hash_hi; @@ -62,6 +69,7 @@ struct smsc95xx_priv { u32 wolopts; spinlock_t mac_cr_lock; u8 features; + u8 suspend_flags; }; static bool turbo_mode = true; @@ -513,10 +521,8 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, u32 flow, afc_cfg = 0; int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg); - if (ret < 0) { - netdev_warn(dev->net, "Error reading AFC_CFG\n"); + if (ret < 0) return ret; - } if (duplex == DUPLEX_FULL) { u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); @@ -541,16 +547,10 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, } ret = smsc95xx_write_reg(dev, FLOW, flow); - if (ret < 0) { - netdev_warn(dev->net, "Error writing FLOW\n"); - return ret; - } - - ret = smsc95xx_write_reg(dev, AFC_CFG, afc_cfg); if (ret < 0) - netdev_warn(dev->net, "Error writing AFC_CFG\n"); + return ret; - return ret; + return smsc95xx_write_reg(dev, AFC_CFG, afc_cfg); } static int smsc95xx_link_reset(struct usbnet *dev) @@ -564,16 +564,12 @@ static int smsc95xx_link_reset(struct usbnet *dev) /* clear interrupt status */ ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PHY_INT_SRC\n"); + if (ret < 0) return ret; - } ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_); - if (ret < 0) { - netdev_warn(dev->net, "Error writing INT_STS\n"); + if (ret < 0) return ret; - } mii_check_media(mii, 1, 1); mii_ethtool_gset(&dev->mii, &ecmd); @@ -595,10 +591,8 @@ static int smsc95xx_link_reset(struct usbnet *dev) spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); - if (ret < 0) { - netdev_warn(dev->net, "Error writing MAC_CR\n"); + if (ret < 0) return ret; - } ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv); if (ret < 0) @@ -638,10 +632,8 @@ static int smsc95xx_set_features(struct net_device *netdev, int ret; ret = smsc95xx_read_reg(dev, COE_CR, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret); + if (ret < 0) return ret; - } if (features & NETIF_F_HW_CSUM) read_buf |= Tx_COE_EN_; @@ -654,10 +646,8 @@ static int smsc95xx_set_features(struct net_device *netdev, read_buf &= ~Rx_COE_EN_; ret = smsc95xx_write_reg(dev, COE_CR, read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret); + if (ret < 0) return ret; - } netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf); return 0; @@ -800,16 +790,10 @@ static int smsc95xx_set_mac_address(struct usbnet *dev) int ret; ret = smsc95xx_write_reg(dev, ADDRL, addr_lo); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret); - return ret; - } - - ret = smsc95xx_write_reg(dev, ADDRH, addr_hi); if (ret < 0) - netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret); + return ret; - return ret; + return smsc95xx_write_reg(dev, ADDRH, addr_hi); } /* starts the TX path */ @@ -825,17 +809,11 @@ static int smsc95xx_start_tx_path(struct usbnet *dev) spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret); + if (ret < 0) return ret; - } /* Enable Tx at SCSRs */ - ret = smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_); - if (ret < 0) - netdev_warn(dev->net, "Failed to write TX_CFG: %d\n", ret); - - return ret; + return smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_); } /* Starts the Receive path */ @@ -843,17 +821,12 @@ static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm) { struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); unsigned long flags; - int ret; spin_lock_irqsave(&pdata->mac_cr_lock, flags); pdata->mac_cr |= MAC_CR_RXEN_; spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); - ret = __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm); - if (ret < 0) - netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret); - - return ret; + return __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm); } static int smsc95xx_phy_initialize(struct usbnet *dev) @@ -910,19 +883,15 @@ static int smsc95xx_reset(struct usbnet *dev) netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n"); ret = smsc95xx_write_reg(dev, HW_CFG, HW_CFG_LRST_); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n"); + if (ret < 0) return ret; - } timeout = 0; do { msleep(10); ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); + if (ret < 0) return ret; - } timeout++; } while ((read_buf & HW_CFG_LRST_) && (timeout < 100)); @@ -932,19 +901,15 @@ static int smsc95xx_reset(struct usbnet *dev) } ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret); + if (ret < 0) return ret; - } timeout = 0; do { msleep(10); ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret); + if (ret < 0) return ret; - } timeout++; } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100)); @@ -961,10 +926,8 @@ static int smsc95xx_reset(struct usbnet *dev) dev->net->dev_addr); ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); + if (ret < 0) return ret; - } netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n", read_buf); @@ -972,16 +935,12 @@ static int smsc95xx_reset(struct usbnet *dev) read_buf |= HW_CFG_BIR_; ret = smsc95xx_write_reg(dev, HW_CFG, read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n"); + if (ret < 0) return ret; - } ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); + if (ret < 0) return ret; - } netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n", @@ -1002,42 +961,32 @@ static int smsc95xx_reset(struct usbnet *dev) (ulong)dev->rx_urb_size); ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret); + if (ret < 0) return ret; - } ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret); + if (ret < 0) return ret; - } netif_dbg(dev, ifup, dev->net, "Read Value from BURST_CAP after writing: 0x%08x\n", read_buf); ret = smsc95xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write BULK_IN_DLY: %d\n", ret); + if (ret < 0) return ret; - } ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret); + if (ret < 0) return ret; - } netif_dbg(dev, ifup, dev->net, "Read Value from BULK_IN_DLY after writing: 0x%08x\n", read_buf); ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); + if (ret < 0) return ret; - } netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG: 0x%08x\n", read_buf); @@ -1051,69 +1000,51 @@ static int smsc95xx_reset(struct usbnet *dev) read_buf |= NET_IP_ALIGN << 9; ret = smsc95xx_write_reg(dev, HW_CFG, read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); + if (ret < 0) return ret; - } ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); + if (ret < 0) return ret; - } netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after writing: 0x%08x\n", read_buf); ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write INT_STS: %d\n", ret); + if (ret < 0) return ret; - } ret = smsc95xx_read_reg(dev, ID_REV, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret); + if (ret < 0) return ret; - } netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf); /* Configure GPIO pins as LED outputs */ write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED | LED_GPIO_CFG_FDX_LED; ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write LED_GPIO_CFG: %d\n", ret); + if (ret < 0) return ret; - } /* Init Tx */ ret = smsc95xx_write_reg(dev, FLOW, 0); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret); + if (ret < 0) return ret; - } ret = smsc95xx_write_reg(dev, AFC_CFG, AFC_CFG_DEFAULT); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret); + if (ret < 0) return ret; - } /* Don't need mac_cr_lock during initialisation */ ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret); + if (ret < 0) return ret; - } /* Init Rx */ /* Set Vlan */ ret = smsc95xx_write_reg(dev, VLAN1, (u32)ETH_P_8021Q); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write VLAN1: %d\n", ret); + if (ret < 0) return ret; - } /* Enable or disable checksum offload engines */ ret = smsc95xx_set_features(dev->net, dev->net->features); @@ -1131,19 +1062,15 @@ static int smsc95xx_reset(struct usbnet *dev) } ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret); + if (ret < 0) return ret; - } /* enable PHY interrupts */ read_buf |= INT_EP_CTL_PHY_INT_; ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret); + if (ret < 0) return ret; - } ret = smsc95xx_start_tx_path(dev); if (ret < 0) { @@ -1189,13 +1116,11 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) } dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv), - GFP_KERNEL); + GFP_KERNEL); pdata = (struct smsc95xx_priv *)(dev->data[0]); - if (!pdata) { - netdev_warn(dev->net, "Unable to allocate struct smsc95xx_priv\n"); + if (!pdata) return -ENOMEM; - } spin_lock_init(&pdata->mac_cr_lock); @@ -1213,10 +1138,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) /* detect device revision as different features may be available */ ret = smsc95xx_read_reg(dev, ID_REV, &val); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret); + if (ret < 0) return ret; - } val >>= 16; if ((val == ID_REV_CHIP_ID_9500A_) || (val == ID_REV_CHIP_ID_9530_) || @@ -1261,17 +1184,13 @@ static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask) /* read to clear */ ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PHY_INT_SRC\n"); + if (ret < 0) return ret; - } /* enable interrupt source */ ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PHY_INT_MASK\n"); + if (ret < 0) return ret; - } ret |= mask; @@ -1287,16 +1206,12 @@ static int smsc95xx_link_ok_nopm(struct usbnet *dev) /* first, a dummy read, needed to latch some MII phys */ ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR); - if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_BMSR\n"); + if (ret < 0) return ret; - } ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR); - if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_BMSR\n"); + if (ret < 0) return ret; - } return !!(ret & BMSR_LSTATUS); } @@ -1308,19 +1223,15 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev) int ret; ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PM_CTRL\n"); + if (ret < 0) return ret; - } val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_)); val |= PM_CTL_SUS_MODE_0; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PM_CTRL\n"); + if (ret < 0) return ret; - } /* clear wol status */ val &= ~PM_CTL_WUPS_; @@ -1331,15 +1242,13 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev) val |= PM_CTL_WUPS_ED_; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PM_CTRL\n"); + if (ret < 0) return ret; - } /* read back PM_CTRL */ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) - netdev_warn(dev->net, "Error reading PM_CTRL\n"); + + pdata->suspend_flags |= SUSPEND_SUSPEND0; return ret; } @@ -1360,10 +1269,8 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev) /* enable energy detect power-down mode */ ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PHY_MODE_CTRL_STS\n"); + if (ret < 0) return ret; - } ret |= MODE_CTRL_STS_EDPWRDOWN_; @@ -1371,52 +1278,133 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev) /* enter SUSPEND1 mode */ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PM_CTRL\n"); + if (ret < 0) return ret; - } val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_); val |= PM_CTL_SUS_MODE_1; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PM_CTRL\n"); + if (ret < 0) return ret; - } /* clear wol status, enable energy detection */ val &= ~PM_CTL_WUPS_; val |= (PM_CTL_WUPS_ED_ | PM_CTL_ED_EN_); ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) - netdev_warn(dev->net, "Error writing PM_CTRL\n"); + + pdata->suspend_flags |= SUSPEND_SUSPEND1; return ret; } static int smsc95xx_enter_suspend2(struct usbnet *dev) { + struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); u32 val; int ret; ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PM_CTRL\n"); + if (ret < 0) return ret; - } val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_); val |= PM_CTL_SUS_MODE_2; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) - netdev_warn(dev->net, "Error writing PM_CTRL\n"); + + pdata->suspend_flags |= SUSPEND_SUSPEND2; return ret; } +static int smsc95xx_enter_suspend3(struct usbnet *dev) +{ + struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); + u32 val; + int ret; + + ret = smsc95xx_read_reg_nopm(dev, RX_FIFO_INF, &val); + if (ret < 0) + return ret; + + if (val & 0xFFFF) { + netdev_info(dev->net, "rx fifo not empty in autosuspend\n"); + return -EBUSY; + } + + ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); + if (ret < 0) + return ret; + + val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_); + val |= PM_CTL_SUS_MODE_3 | PM_CTL_RES_CLR_WKP_STS; + + ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); + if (ret < 0) + return ret; + + /* clear wol status */ + val &= ~PM_CTL_WUPS_; + val |= PM_CTL_WUPS_WOL_; + + ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); + if (ret < 0) + return ret; + + pdata->suspend_flags |= SUSPEND_SUSPEND3; + + return 0; +} + +static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up) +{ + struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); + int ret; + + if (!netif_running(dev->net)) { + /* interface is ifconfig down so fully power down hw */ + netdev_dbg(dev->net, "autosuspend entering SUSPEND2\n"); + return smsc95xx_enter_suspend2(dev); + } + + if (!link_up) { + /* link is down so enter EDPD mode, but only if device can + * reliably resume from it. This check should be redundant + * as current FEATURE_AUTOSUSPEND parts also support + * FEATURE_PHY_NLP_CROSSOVER but it's included for clarity */ + if (!(pdata->features & FEATURE_PHY_NLP_CROSSOVER)) { + netdev_warn(dev->net, "EDPD not supported\n"); + return -EBUSY; + } + + netdev_dbg(dev->net, "autosuspend entering SUSPEND1\n"); + + /* enable PHY wakeup events for if cable is attached */ + ret = smsc95xx_enable_phy_wakeup_interrupts(dev, + PHY_INT_MASK_ANEG_COMP_); + if (ret < 0) { + netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); + return ret; + } + + netdev_info(dev->net, "entering SUSPEND1 mode\n"); + return smsc95xx_enter_suspend1(dev); + } + + /* enable PHY wakeup events so we remote wakeup if cable is pulled */ + ret = smsc95xx_enable_phy_wakeup_interrupts(dev, + PHY_INT_MASK_LINK_DOWN_); + if (ret < 0) { + netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); + return ret; + } + + netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n"); + return smsc95xx_enter_suspend3(dev); +} + static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); @@ -1424,15 +1412,35 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) u32 val, link_up; int ret; + /* TODO: don't indicate this feature to usb framework if + * our current hardware doesn't have the capability + */ + if ((message.event == PM_EVENT_AUTO_SUSPEND) && + (!(pdata->features & FEATURE_AUTOSUSPEND))) { + netdev_warn(dev->net, "autosuspend not supported\n"); + return -EBUSY; + } + ret = usbnet_suspend(intf, message); if (ret < 0) { netdev_warn(dev->net, "usbnet_suspend error\n"); return ret; } + if (pdata->suspend_flags) { + netdev_warn(dev->net, "error during last resume\n"); + pdata->suspend_flags = 0; + } + /* determine if link is up using only _nopm functions */ link_up = smsc95xx_link_ok_nopm(dev); + if (message.event == PM_EVENT_AUTO_SUSPEND) { + ret = smsc95xx_autosuspend(dev, link_up); + goto done; + } + + /* if we get this far we're not autosuspending */ /* if no wol options set, or if link is down and we're not waking on * PHY activity, enter lowest power SUSPEND2 mode */ @@ -1442,32 +1450,24 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) /* disable energy detect (link up) & wake up events */ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); + if (ret < 0) goto done; - } val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_); ret = smsc95xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); + if (ret < 0) goto done; - } ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PM_CTRL\n"); + if (ret < 0) goto done; - } val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_); ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PM_CTRL\n"); + if (ret < 0) goto done; - } ret = smsc95xx_enter_suspend2(dev); goto done; @@ -1565,7 +1565,6 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) for (i = 0; i < (wuff_filter_count * 4); i++) { ret = smsc95xx_write_reg_nopm(dev, WUFF, filter_mask[i]); if (ret < 0) { - netdev_warn(dev->net, "Error writing WUFF\n"); kfree(filter_mask); goto done; } @@ -1574,67 +1573,51 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) for (i = 0; i < (wuff_filter_count / 4); i++) { ret = smsc95xx_write_reg_nopm(dev, WUFF, command[i]); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUFF\n"); + if (ret < 0) goto done; - } } for (i = 0; i < (wuff_filter_count / 4); i++) { ret = smsc95xx_write_reg_nopm(dev, WUFF, offset[i]); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUFF\n"); + if (ret < 0) goto done; - } } for (i = 0; i < (wuff_filter_count / 2); i++) { ret = smsc95xx_write_reg_nopm(dev, WUFF, crc[i]); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUFF\n"); + if (ret < 0) goto done; - } } /* clear any pending pattern match packet status */ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); + if (ret < 0) goto done; - } val |= WUCSR_WUFR_; ret = smsc95xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); + if (ret < 0) goto done; - } } if (pdata->wolopts & WAKE_MAGIC) { /* clear any pending magic packet status */ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); + if (ret < 0) goto done; - } val |= WUCSR_MPR_; ret = smsc95xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); + if (ret < 0) goto done; - } } /* enable/disable wakeup sources */ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); + if (ret < 0) goto done; - } if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) { netdev_info(dev->net, "enabling pattern match wakeup\n"); @@ -1653,17 +1636,13 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) } ret = smsc95xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); + if (ret < 0) goto done; - } /* enable wol wakeup source */ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PM_CTRL\n"); + if (ret < 0) goto done; - } val |= PM_CTL_WOL_EN_; @@ -1672,10 +1651,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) val |= PM_CTL_ED_EN_; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PM_CTRL\n"); + if (ret < 0) goto done; - } /* enable receiver to enable frame reception */ smsc95xx_start_rx_path(dev, 1); @@ -1694,42 +1671,40 @@ static int smsc95xx_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); + u8 suspend_flags = pdata->suspend_flags; int ret; u32 val; BUG_ON(!dev); - if (pdata->wolopts) { + netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags); + + /* do this first to ensure it's cleared even in error case */ + pdata->suspend_flags = 0; + + if (suspend_flags & SUSPEND_ALLMODES) { /* clear wake-up sources */ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); + if (ret < 0) return ret; - } val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_); ret = smsc95xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); + if (ret < 0) return ret; - } /* clear wake-up status */ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PM_CTRL\n"); + if (ret < 0) return ret; - } val &= ~PM_CTL_WOL_EN_; val |= PM_CTL_WUPS_; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PM_CTRL\n"); + if (ret < 0) return ret; - } } ret = usbnet_resume(intf); @@ -1891,6 +1866,26 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev, return skb; } +static int smsc95xx_manage_power(struct usbnet *dev, int on) +{ + struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); + + dev->intf->needs_remote_wakeup = on; + + if (pdata->features & FEATURE_AUTOSUSPEND) + return 0; + + /* this chip revision doesn't support autosuspend */ + netdev_info(dev->net, "hardware doesn't support USB autosuspend\n"); + + if (on) + usb_autopm_get_interface_no_resume(dev->intf); + else + usb_autopm_put_interface(dev->intf); + + return 0; +} + static const struct driver_info smsc95xx_info = { .description = "smsc95xx USB 2.0 Ethernet", .bind = smsc95xx_bind, @@ -1900,6 +1895,7 @@ static const struct driver_info smsc95xx_info = { .rx_fixup = smsc95xx_rx_fixup, .tx_fixup = smsc95xx_tx_fixup, .status = smsc95xx_status, + .manage_power = smsc95xx_manage_power, .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR, }; @@ -2007,6 +2003,7 @@ static struct usb_driver smsc95xx_driver = { .reset_resume = smsc95xx_resume, .disconnect = usbnet_disconnect, .disable_hub_initiated_lpm = 1, + .supports_autosuspend = 1, }; module_usb_driver(smsc95xx_driver); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 5e33606c1366..51f3192f3931 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1813,11 +1813,8 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, } req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); - if (!req) { - netdev_err(dev->net, "Failed to allocate memory for %s\n", - __func__); + if (!req) goto fail_free_buf; - } req->bRequestType = reqtype; req->bRequest = cmd; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 95814d9747ef..07a4af0aa3dc 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -25,18 +25,15 @@ #define MIN_MTU 68 /* Min L3 MTU */ #define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */ -struct veth_net_stats { - u64 rx_packets; - u64 rx_bytes; - u64 tx_packets; - u64 tx_bytes; - u64 rx_dropped; +struct pcpu_vstats { + u64 packets; + u64 bytes; struct u64_stats_sync syncp; }; struct veth_priv { - struct net_device *peer; - struct veth_net_stats __percpu *stats; + struct net_device __rcu *peer; + atomic64_t dropped; }; /* @@ -92,10 +89,10 @@ static int veth_get_sset_count(struct net_device *dev, int sset) static void veth_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { - struct veth_priv *priv; + struct veth_priv *priv = netdev_priv(dev); + struct net_device *peer = rtnl_dereference(priv->peer); - priv = netdev_priv(dev); - data[0] = priv->peer->ifindex; + data[0] = peer ? peer->ifindex : 0; } static const struct ethtool_ops veth_ethtool_ops = { @@ -107,50 +104,37 @@ static const struct ethtool_ops veth_ethtool_ops = { .get_ethtool_stats = veth_get_ethtool_stats, }; -/* - * xmit - */ - static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { - struct net_device *rcv = NULL; - struct veth_priv *priv, *rcv_priv; - struct veth_net_stats *stats, *rcv_stats; - int length; - - priv = netdev_priv(dev); - rcv = priv->peer; - rcv_priv = netdev_priv(rcv); - - stats = this_cpu_ptr(priv->stats); - rcv_stats = this_cpu_ptr(rcv_priv->stats); - + struct veth_priv *priv = netdev_priv(dev); + struct net_device *rcv; + int length = skb->len; + + rcu_read_lock(); + rcv = rcu_dereference(priv->peer); + if (unlikely(!rcv)) { + kfree_skb(skb); + goto drop; + } /* don't change ip_summed == CHECKSUM_PARTIAL, as that - will cause bad checksum on forwarded packets */ + * will cause bad checksum on forwarded packets + */ if (skb->ip_summed == CHECKSUM_NONE && rcv->features & NETIF_F_RXCSUM) skb->ip_summed = CHECKSUM_UNNECESSARY; - length = skb->len; - if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS) - goto rx_drop; + if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) { + struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats); - u64_stats_update_begin(&stats->syncp); - stats->tx_bytes += length; - stats->tx_packets++; - u64_stats_update_end(&stats->syncp); - - u64_stats_update_begin(&rcv_stats->syncp); - rcv_stats->rx_bytes += length; - rcv_stats->rx_packets++; - u64_stats_update_end(&rcv_stats->syncp); - - return NETDEV_TX_OK; - -rx_drop: - u64_stats_update_begin(&rcv_stats->syncp); - rcv_stats->rx_dropped++; - u64_stats_update_end(&rcv_stats->syncp); + u64_stats_update_begin(&stats->syncp); + stats->bytes += length; + stats->packets++; + u64_stats_update_end(&stats->syncp); + } else { +drop: + atomic64_inc(&priv->dropped); + } + rcu_read_unlock(); return NETDEV_TX_OK; } @@ -158,47 +142,63 @@ rx_drop: * general routines */ -static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *tot) +static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); int cpu; + result->packets = 0; + result->bytes = 0; for_each_possible_cpu(cpu) { - struct veth_net_stats *stats = per_cpu_ptr(priv->stats, cpu); - u64 rx_packets, rx_bytes, rx_dropped; - u64 tx_packets, tx_bytes; + struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu); + u64 packets, bytes; unsigned int start; do { start = u64_stats_fetch_begin_bh(&stats->syncp); - rx_packets = stats->rx_packets; - tx_packets = stats->tx_packets; - rx_bytes = stats->rx_bytes; - tx_bytes = stats->tx_bytes; - rx_dropped = stats->rx_dropped; + packets = stats->packets; + bytes = stats->bytes; } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); - tot->rx_packets += rx_packets; - tot->tx_packets += tx_packets; - tot->rx_bytes += rx_bytes; - tot->tx_bytes += tx_bytes; - tot->rx_dropped += rx_dropped; + result->packets += packets; + result->bytes += bytes; } + return atomic64_read(&priv->dropped); +} + +static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) +{ + struct veth_priv *priv = netdev_priv(dev); + struct net_device *peer; + struct pcpu_vstats one; + + tot->tx_dropped = veth_stats_one(&one, dev); + tot->tx_bytes = one.bytes; + tot->tx_packets = one.packets; + + rcu_read_lock(); + peer = rcu_dereference(priv->peer); + if (peer) { + tot->rx_dropped = veth_stats_one(&one, peer); + tot->rx_bytes = one.bytes; + tot->rx_packets = one.packets; + } + rcu_read_unlock(); return tot; } static int veth_open(struct net_device *dev) { - struct veth_priv *priv; + struct veth_priv *priv = netdev_priv(dev); + struct net_device *peer = rtnl_dereference(priv->peer); - priv = netdev_priv(dev); - if (priv->peer == NULL) + if (!peer) return -ENOTCONN; - if (priv->peer->flags & IFF_UP) { + if (peer->flags & IFF_UP) { netif_carrier_on(dev); - netif_carrier_on(priv->peer); + netif_carrier_on(peer); } return 0; } @@ -206,9 +206,11 @@ static int veth_open(struct net_device *dev) static int veth_close(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); + struct net_device *peer = rtnl_dereference(priv->peer); netif_carrier_off(dev); - netif_carrier_off(priv->peer); + if (peer) + netif_carrier_off(peer); return 0; } @@ -228,24 +230,16 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu) static int veth_dev_init(struct net_device *dev) { - struct veth_net_stats __percpu *stats; - struct veth_priv *priv; - - stats = alloc_percpu(struct veth_net_stats); - if (stats == NULL) + dev->vstats = alloc_percpu(struct pcpu_vstats); + if (!dev->vstats) return -ENOMEM; - priv = netdev_priv(dev); - priv->stats = stats; return 0; } static void veth_dev_free(struct net_device *dev) { - struct veth_priv *priv; - - priv = netdev_priv(dev); - free_percpu(priv->stats); + free_percpu(dev->vstats); free_netdev(dev); } @@ -259,6 +253,10 @@ static const struct net_device_ops veth_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, }; +#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ + NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \ + NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX) + static void veth_setup(struct net_device *dev) { ether_setup(dev); @@ -269,9 +267,10 @@ static void veth_setup(struct net_device *dev) dev->netdev_ops = &veth_netdev_ops; dev->ethtool_ops = &veth_ethtool_ops; dev->features |= NETIF_F_LLTX; + dev->features |= VETH_FEATURES; dev->destructor = veth_dev_free; - dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; + dev->hw_features = VETH_FEATURES; } /* @@ -396,10 +395,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, */ priv = netdev_priv(dev); - priv->peer = peer; + rcu_assign_pointer(priv->peer, peer); priv = netdev_priv(peer); - priv->peer = dev; + rcu_assign_pointer(priv->peer, dev); return 0; err_register_dev: @@ -420,10 +419,20 @@ static void veth_dellink(struct net_device *dev, struct list_head *head) struct net_device *peer; priv = netdev_priv(dev); - peer = priv->peer; + peer = rtnl_dereference(priv->peer); + /* Note : dellink() is called from default_device_exit_batch(), + * before a rcu_synchronize() point. The devices are guaranteed + * not being freed before one RCU grace period. + */ + RCU_INIT_POINTER(priv->peer, NULL); unregister_netdevice_queue(dev, head); - unregister_netdevice_queue(peer, head); + + if (peer) { + priv = netdev_priv(peer); + RCU_INIT_POINTER(priv->peer, NULL); + unregister_netdevice_queue(peer, head); + } } static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = { diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 35c00c5ea02a..192c91c8e799 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -227,6 +227,7 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page, skb->len += size; skb->truesize += PAGE_SIZE; skb_shinfo(skb)->nr_frags++; + skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; *len -= size; } @@ -760,19 +761,77 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } +/* + * Send command via the control virtqueue and check status. Commands + * supported by the hypervisor, as indicated by feature bits, should + * never fail unless improperly formated. + */ +static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, + struct scatterlist *data, int out, int in) +{ + struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; + struct virtio_net_ctrl_hdr ctrl; + virtio_net_ctrl_ack status = ~0; + unsigned int tmp; + int i; + + /* Caller should know better */ + BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || + (out + in > VIRTNET_SEND_COMMAND_SG_MAX)); + + out++; /* Add header */ + in++; /* Add return status */ + + ctrl.class = class; + ctrl.cmd = cmd; + + sg_init_table(sg, out + in); + + sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); + for_each_sg(data, s, out + in - 2, i) + sg_set_buf(&sg[i + 1], sg_virt(s), s->length); + sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); + + BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0); + + virtqueue_kick(vi->cvq); + + /* Spin for a response, the kick causes an ioport write, trapping + * into the hypervisor, so the request should be handled immediately. + */ + while (!virtqueue_get_buf(vi->cvq, &tmp)) + cpu_relax(); + + return status == VIRTIO_NET_OK; +} + static int virtnet_set_mac_address(struct net_device *dev, void *p) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; int ret; + struct sockaddr *addr = p; + struct scatterlist sg; - ret = eth_mac_addr(dev, p); + ret = eth_prepare_mac_addr_change(dev, p); if (ret) return ret; - if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) + if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { + sg_init_one(&sg, addr->sa_data, dev->addr_len); + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, + VIRTIO_NET_CTRL_MAC_ADDR_SET, + &sg, 1, 0)) { + dev_warn(&vdev->dev, + "Failed to set mac address by vq command.\n"); + return -EINVAL; + } + } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), - dev->dev_addr, dev->addr_len); + addr->sa_data, dev->addr_len); + } + + eth_commit_mac_addr_change(dev, p); return 0; } @@ -826,51 +885,6 @@ static void virtnet_netpoll(struct net_device *dev) } #endif -/* - * Send command via the control virtqueue and check status. Commands - * supported by the hypervisor, as indicated by feature bits, should - * never fail unless improperly formated. - */ -static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, - struct scatterlist *data, int out, int in) -{ - struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; - struct virtio_net_ctrl_hdr ctrl; - virtio_net_ctrl_ack status = ~0; - unsigned int tmp; - int i; - - /* Caller should know better */ - BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || - (out + in > VIRTNET_SEND_COMMAND_SG_MAX)); - - out++; /* Add header */ - in++; /* Add return status */ - - ctrl.class = class; - ctrl.cmd = cmd; - - sg_init_table(sg, out + in); - - sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); - for_each_sg(data, s, out + in - 2, i) - sg_set_buf(&sg[i + 1], sg_virt(s), s->length); - sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); - - BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0); - - virtqueue_kick(vi->cvq); - - /* - * Spin for a response, the kick causes an ioport write, trapping - * into the hypervisor, so the request should be handled immediately. - */ - while (!virtqueue_get_buf(vi->cvq, &tmp)) - cpu_relax(); - - return status == VIRTIO_NET_OK; -} - static void virtnet_ack_link_announce(struct virtnet_info *vi) { rtnl_lock(); @@ -959,10 +973,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + (2 * sizeof(mac_data->entries)), GFP_ATOMIC); mac_data = buf; - if (!buf) { - dev_warn(&dev->dev, "No memory for MAC address buffer\n"); + if (!buf) return; - } sg_init_table(sg, 2); @@ -1706,6 +1718,7 @@ static unsigned int features[] = { VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, + VIRTIO_NET_F_CTRL_MAC_ADDR, }; static struct virtio_driver virtio_net_driver = { diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 12c6440d1649..ffb97b2a15a0 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -43,11 +43,7 @@ static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = { MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table); -static atomic_t devices_found; - -#define VMXNET3_MAX_DEVICES 10 static int enable_mq = 1; -static int irq_share_mode; static void vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); @@ -152,8 +148,8 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) adapter->link_speed = ret >> 16; if (ret & 1) { /* Link is up. */ - printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", - adapter->netdev->name, adapter->link_speed); + netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", + adapter->link_speed); netif_carrier_on(adapter->netdev); if (affectTxQueue) { @@ -162,8 +158,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) adapter); } } else { - printk(KERN_INFO "%s: NIC Link is Down\n", - adapter->netdev->name); + netdev_info(adapter->netdev, "NIC Link is Down\n"); netif_carrier_off(adapter->netdev); if (affectTxQueue) { @@ -508,8 +503,7 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, * sizeof(struct Vmxnet3_TxDesc), &tq->tx_ring.basePA); if (!tq->tx_ring.base) { - printk(KERN_ERR "%s: failed to allocate tx ring\n", - adapter->netdev->name); + netdev_err(adapter->netdev, "failed to allocate tx ring\n"); goto err; } @@ -518,8 +512,7 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, sizeof(struct Vmxnet3_TxDataDesc), &tq->data_ring.basePA); if (!tq->data_ring.base) { - printk(KERN_ERR "%s: failed to allocate data ring\n", - adapter->netdev->name); + netdev_err(adapter->netdev, "failed to allocate data ring\n"); goto err; } @@ -528,8 +521,7 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, sizeof(struct Vmxnet3_TxCompDesc), &tq->comp_ring.basePA); if (!tq->comp_ring.base) { - printk(KERN_ERR "%s: failed to allocate tx comp ring\n", - adapter->netdev->name); + netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); goto err; } @@ -578,15 +570,14 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { if (rbi->skb == NULL) { - rbi->skb = dev_alloc_skb(rbi->len + - NET_IP_ALIGN); + rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev, + rbi->len, + GFP_KERNEL); if (unlikely(rbi->skb == NULL)) { rq->stats.rx_buf_alloc_failure++; break; } - rbi->skb->dev = adapter->netdev; - skb_reserve(rbi->skb, NET_IP_ALIGN); rbi->dma_addr = pci_map_single(adapter->pdev, rbi->skb->data, rbi->len, PCI_DMA_FROMDEVICE); @@ -627,12 +618,10 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, num_allocated++; vmxnet3_cmd_ring_adv_next2fill(ring); } - rq->uncommitted[ring_idx] += num_allocated; - dev_dbg(&adapter->netdev->dev, - "alloc_rx_buf: %d allocated, next2fill %u, next2comp " - "%u, uncommitted %u\n", num_allocated, ring->next2fill, - ring->next2comp, rq->uncommitted[ring_idx]); + netdev_dbg(adapter->netdev, + "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n", + num_allocated, ring->next2fill, ring->next2comp); /* so that the device can distinguish a full ring and an empty ring */ BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); @@ -689,7 +678,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, tbi = tq->buf_info + tq->tx_ring.next2fill; tbi->map_type = VMXNET3_MAP_NONE; - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "txd[%u]: 0x%Lx 0x%x 0x%x\n", tq->tx_ring.next2fill, le64_to_cpu(ctx->sop_txd->txd.addr), @@ -729,7 +718,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, gdesc->dword[2] = cpu_to_le32(dw2); gdesc->dword[3] = 0; - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "txd[%u]: 0x%Lx 0x%x 0x%x\n", tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); @@ -769,7 +758,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, gdesc->dword[2] = cpu_to_le32(dw2); gdesc->dword[3] = 0; - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "txd[%u]: 0x%llu %u %u\n", tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); @@ -869,7 +858,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, tdd = tq->data_ring.base + tq->tx_ring.next2fill; memcpy(tdd->data, skb->data, ctx->copy_size); - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "copy %u bytes to dataRing[%u]\n", ctx->copy_size, tq->tx_ring.next2fill); return 1; @@ -975,7 +964,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { tq->stats.tx_ring_full++; - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "tx queue stopped on %s, next2comp %u" " next2fill %u\n", adapter->netdev->name, tq->tx_ring.next2comp, tq->tx_ring.next2fill); @@ -1058,7 +1047,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, (struct Vmxnet3_TxDesc *)ctx.sop_txd); gdesc = ctx.sop_txd; #endif - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", (u32)(ctx.sop_txd - tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), @@ -1211,7 +1200,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, if (unlikely(rcd->len == 0)) { /* Pretend the rx buffer is skipped. */ BUG_ON(!(rcd->sop && rcd->eop)); - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "rxRing[%u][%u] 0 length\n", ring_idx, idx); goto rcd_done; @@ -1219,7 +1208,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, skip_page_frags = false; ctx->skb = rbi->skb; - new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN); + new_skb = netdev_alloc_skb_ip_align(adapter->netdev, + rbi->len); if (new_skb == NULL) { /* Skb allocation failed, do not handover this * skb to stack. Reuse it. Drop the existing pkt @@ -1234,11 +1224,14 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, PCI_DMA_FROMDEVICE); +#ifdef VMXNET3_RSS + if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE && + (adapter->netdev->features & NETIF_F_RXHASH)) + ctx->skb->rxhash = le32_to_cpu(rcd->rssHash); +#endif skb_put(ctx->skb, rcd->len); /* Immediate refill */ - new_skb->dev = adapter->netdev; - skb_reserve(new_skb, NET_IP_ALIGN); rbi->skb = new_skb; rbi->dma_addr = pci_map_single(adapter->pdev, rbi->skb->data, rbi->len, @@ -1331,7 +1324,6 @@ rcd_done: VMXNET3_WRITE_BAR0_REG(adapter, rxprod_reg[ring_idx] + rq->qid * 8, ring->next2fill); - rq->uncommitted[ring_idx] = 0; } vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); @@ -1376,7 +1368,6 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; rq->rx_ring[ring_idx].next2fill = rq->rx_ring[ring_idx].next2comp = 0; - rq->uncommitted[ring_idx] = 0; } rq->comp_ring.gen = VMXNET3_INIT_GEN; @@ -1457,7 +1448,6 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, /* reset internal state and allocate buffers for both rings */ for (i = 0; i < 2; i++) { rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; - rq->uncommitted[i] = 0; memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc)); @@ -1516,8 +1506,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz, &rq->rx_ring[i].basePA); if (!rq->rx_ring[i].base) { - printk(KERN_ERR "%s: failed to allocate rx ring %d\n", - adapter->netdev->name, i); + netdev_err(adapter->netdev, + "failed to allocate rx ring %d\n", i); goto err; } } @@ -1526,8 +1516,7 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz, &rq->comp_ring.basePA); if (!rq->comp_ring.base) { - printk(KERN_ERR "%s: failed to allocate rx comp ring\n", - adapter->netdev->name); + netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); goto err; } @@ -1819,9 +1808,10 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) adapter->rx_queue[i].name, &(adapter->rx_queue[i])); if (err) { - printk(KERN_ERR "Failed to request irq for MSIX" - ", %s, error %d\n", - adapter->rx_queue[i].name, err); + netdev_err(adapter->netdev, + "Failed to request irq for MSIX, " + "%s, error %d\n", + adapter->rx_queue[i].name, err); return err; } @@ -1850,8 +1840,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) #endif intr->num_intrs = vector + 1; if (err) { - printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" - ":%d\n", adapter->netdev->name, intr->type, err); + netdev_err(adapter->netdev, + "Failed to request irq (intr type:%d), error %d\n", + intr->type, err); } else { /* Number of rx queues will not change after this */ for (i = 0; i < adapter->num_rx_queues; i++) { @@ -1872,9 +1863,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) adapter->rx_queue[0].comp_ring.intr_idx = 0; } - printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " - "allocated\n", adapter->netdev->name, intr->type, - intr->mask_mode, intr->num_intrs); + netdev_info(adapter->netdev, + "intr type %u, mode %u, %u vectors allocated\n", + intr->type, intr->mask_mode, intr->num_intrs); } return err; @@ -2040,8 +2031,8 @@ vmxnet3_set_mc(struct net_device *netdev) rxConf->mfTablePA = cpu_to_le64(virt_to_phys( new_table)); } else { - printk(KERN_INFO "%s: failed to copy mcast list" - ", setting ALL_MULTI\n", netdev->name); + netdev_info(netdev, "failed to copy mcast list" + ", setting ALL_MULTI\n"); new_mode |= VMXNET3_RXM_ALL_MULTI; } } @@ -2169,6 +2160,14 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) if (adapter->rss) { struct UPT1_RSSConf *rssConf = adapter->rss_conf; + static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = { + 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac, + 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28, + 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70, + 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3, + 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9, + }; + devRead->misc.uptFeatures |= UPT1_F_RSS; devRead->misc.numRxQueues = adapter->num_rx_queues; rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 | @@ -2178,7 +2177,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ; rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; - get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize); + memcpy(rssConf->hashKey, rss_key, sizeof(rss_key)); + for (i = 0; i < rssConf->indTableSize; i++) rssConf->indTable[i] = ethtool_rxfh_indir_default( i, adapter->num_rx_queues); @@ -2216,7 +2216,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) u32 ret; unsigned long flags; - dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," + netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," " ring sizes %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, adapter->rx_buf_per_pkt, adapter->tx_queue[0].tx_ring.size, @@ -2226,15 +2226,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) vmxnet3_tq_init_all(adapter); err = vmxnet3_rq_init_all(adapter); if (err) { - printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", - adapter->netdev->name, err); + netdev_err(adapter->netdev, + "Failed to init rx queue error %d\n", err); goto rq_err; } err = vmxnet3_request_irqs(adapter); if (err) { - printk(KERN_ERR "Failed to setup irq for %s: error %d\n", - adapter->netdev->name, err); + netdev_err(adapter->netdev, + "Failed to setup irq for error %d\n", err); goto irq_err; } @@ -2251,8 +2251,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) spin_unlock_irqrestore(&adapter->cmd_lock, flags); if (ret != 0) { - printk(KERN_ERR "Failed to activate dev %s: error %u\n", - adapter->netdev->name, ret); + netdev_err(adapter->netdev, + "Failed to activate dev: error %u\n", ret); err = -EINVAL; goto activate_err; } @@ -2367,23 +2367,22 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) err = pci_enable_device(pdev); if (err) { - printk(KERN_ERR "Failed to enable adapter %s: error %d\n", - pci_name(pdev), err); + dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err); return err; } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { - printk(KERN_ERR "pci_set_consistent_dma_mask failed " - "for adapter %s\n", pci_name(pdev)); + dev_err(&pdev->dev, + "pci_set_consistent_dma_mask failed\n"); err = -EIO; goto err_set_mask; } *dma64 = true; } else { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { - printk(KERN_ERR "pci_set_dma_mask failed for adapter " - "%s\n", pci_name(pdev)); + dev_err(&pdev->dev, + "pci_set_dma_mask failed\n"); err = -EIO; goto err_set_mask; } @@ -2393,8 +2392,8 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) err = pci_request_selected_regions(pdev, (1 << 2) - 1, vmxnet3_driver_name); if (err) { - printk(KERN_ERR "Failed to request region for adapter %s: " - "error %d\n", pci_name(pdev), err); + dev_err(&pdev->dev, + "Failed to request region for adapter: error %d\n", err); goto err_set_mask; } @@ -2404,8 +2403,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) mmio_len = pci_resource_len(pdev, 0); adapter->hw_addr0 = ioremap(mmio_start, mmio_len); if (!adapter->hw_addr0) { - printk(KERN_ERR "Failed to map bar0 for adapter %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to map bar0\n"); err = -EIO; goto err_ioremap; } @@ -2414,8 +2412,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) mmio_len = pci_resource_len(pdev, 1); adapter->hw_addr1 = ioremap(mmio_start, mmio_len); if (!adapter->hw_addr1) { - printk(KERN_ERR "Failed to map bar1 for adapter %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to map bar1\n"); err = -EIO; goto err_bar1; } @@ -2522,12 +2519,14 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, err = vmxnet3_rq_create(rq, adapter); if (err) { if (i == 0) { - printk(KERN_ERR "Could not allocate any rx" - "queues. Aborting.\n"); + netdev_err(adapter->netdev, + "Could not allocate any rx queues. " + "Aborting.\n"); goto queue_err; } else { - printk(KERN_INFO "Number of rx queues changed " - "to : %d.\n", i); + netdev_info(adapter->netdev, + "Number of rx queues changed " + "to : %d.\n", i); adapter->num_rx_queues = i; err = 0; break; @@ -2640,15 +2639,17 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) vmxnet3_adjust_rx_ring_size(adapter); err = vmxnet3_rq_create_all(adapter); if (err) { - printk(KERN_ERR "%s: failed to re-create rx queues," - " error %d. Closing it.\n", netdev->name, err); + netdev_err(netdev, + "failed to re-create rx queues, " + " error %d. Closing it.\n", err); goto out; } err = vmxnet3_activate_dev(adapter); if (err) { - printk(KERN_ERR "%s: failed to re-activate, error %d. " - "Closing it\n", netdev->name, err); + netdev_err(netdev, + "failed to re-activate, error %d. " + "Closing it\n", err); goto out; } } @@ -2676,10 +2677,6 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) netdev->vlan_features = netdev->hw_features & ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER; - - netdev_info(adapter->netdev, - "features: sg csum vlan jf tso tsoIPv6 lro%s\n", - dma64 ? " highDMA" : ""); } @@ -2722,7 +2719,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, adapter->intr.num_intrs = vectors; return 0; } else if (err < 0) { - netdev_err(adapter->netdev, + dev_err(&adapter->netdev->dev, "Failed to enable MSI-X, error: %d\n", err); vectors = 0; } else if (err < vector_threshold) { @@ -2731,15 +2728,16 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, /* If fails to enable required number of MSI-x vectors * try enabling minimum number of vectors required. */ - netdev_err(adapter->netdev, - "Failed to enable %d MSI-X, trying %d instead\n", + dev_err(&adapter->netdev->dev, + "Failed to enable %d MSI-X, trying %d instead\n", vectors, vector_threshold); vectors = vector_threshold; } } - netdev_info(adapter->netdev, - "Number of MSI-X interrupts which can be allocated are lower than min threshold required.\n"); + dev_info(&adapter->pdev->dev, + "Number of MSI-X interrupts which can be allocated " + "is lower than min threshold required.\n"); return err; } @@ -2794,7 +2792,8 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE || adapter->num_rx_queues != 1) { adapter->share_intr = VMXNET3_INTR_TXSHARE; - printk(KERN_ERR "Number of rx queues : 1\n"); + netdev_err(adapter->netdev, + "Number of rx queues : 1\n"); adapter->num_rx_queues = 1; adapter->intr.num_intrs = VMXNET3_LINUX_MIN_MSIX_VECT; @@ -2805,9 +2804,9 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) return; /* If we cannot allocate MSIx vectors use only one rx queue */ - netdev_info(adapter->netdev, - "Failed to enable MSI-X, error %d . Limiting #rx queues to 1, try MSI.\n", - err); + dev_info(&adapter->pdev->dev, + "Failed to enable MSI-X, error %d. " + "Limiting #rx queues to 1, try MSI.\n", err); adapter->intr.type = VMXNET3_IT_MSI; } @@ -2824,7 +2823,8 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) #endif /* CONFIG_PCI_MSI */ adapter->num_rx_queues = 1; - printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n"); + dev_info(&adapter->netdev->dev, + "Using INTx interrupt, #Rx queues: 1.\n"); adapter->intr.type = VMXNET3_IT_INTX; /* INT-X related setting */ @@ -2850,7 +2850,7 @@ vmxnet3_tx_timeout(struct net_device *netdev) struct vmxnet3_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; - printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); + netdev_err(adapter->netdev, "tx hang\n"); schedule_work(&adapter->work); netif_wake_queue(adapter->netdev); } @@ -2870,12 +2870,12 @@ vmxnet3_reset_work(struct work_struct *data) /* if the device is closed, we must leave it alone */ rtnl_lock(); if (netif_running(adapter->netdev)) { - printk(KERN_INFO "%s: resetting\n", adapter->netdev->name); + netdev_notice(adapter->netdev, "resetting\n"); vmxnet3_quiesce_dev(adapter); vmxnet3_reset_dev(adapter); vmxnet3_activate_dev(adapter); } else { - printk(KERN_INFO "%s: already closed\n", adapter->netdev->name); + netdev_info(adapter->netdev, "already closed\n"); } rtnl_unlock(); @@ -2934,8 +2934,9 @@ vmxnet3_probe_device(struct pci_dev *pdev, num_tx_queues = rounddown_pow_of_two(num_tx_queues); netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter), max(num_tx_queues, num_rx_queues)); - printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n", - num_tx_queues, num_rx_queues); + dev_info(&pdev->dev, + "# of Tx queues : %d, # of Rx queues : %d\n", + num_tx_queues, num_rx_queues); if (!netdev) return -ENOMEM; @@ -2950,8 +2951,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, sizeof(struct Vmxnet3_DriverShared), &adapter->shared_pa); if (!adapter->shared) { - printk(KERN_ERR "Failed to allocate memory for %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to allocate memory\n"); err = -ENOMEM; goto err_alloc_shared; } @@ -2965,8 +2965,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, &adapter->queue_desc_pa); if (!adapter->tqd_start) { - printk(KERN_ERR "Failed to allocate memory for %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to allocate memory\n"); err = -ENOMEM; goto err_alloc_queue_desc; } @@ -2996,8 +2995,8 @@ vmxnet3_probe_device(struct pci_dev *pdev, if (ver & 1) { VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); } else { - printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter" - " %s\n", ver, pci_name(pdev)); + dev_err(&pdev->dev, + "Incompatible h/w version (0x%x) for adapter\n", ver); err = -EBUSY; goto err_ver; } @@ -3006,8 +3005,8 @@ vmxnet3_probe_device(struct pci_dev *pdev, if (ver & 1) { VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1); } else { - printk(KERN_ERR "Incompatible upt version (0x%x) for " - "adapter %s\n", ver, pci_name(pdev)); + dev_err(&pdev->dev, + "Incompatible upt version (0x%x) for adapter\n", ver); err = -EBUSY; goto err_ver; } @@ -3015,11 +3014,9 @@ vmxnet3_probe_device(struct pci_dev *pdev, SET_NETDEV_DEV(netdev, &pdev->dev); vmxnet3_declare_features(adapter, dma64); - adapter->dev_number = atomic_read(&devices_found); - - adapter->share_intr = irq_share_mode; - if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE && - adapter->num_tx_queues != adapter->num_rx_queues) + if (adapter->num_tx_queues == adapter->num_rx_queues) + adapter->share_intr = VMXNET3_INTR_BUDDYSHARE; + else adapter->share_intr = VMXNET3_INTR_DONTSHARE; vmxnet3_alloc_intr_resources(adapter); @@ -3028,7 +3025,9 @@ vmxnet3_probe_device(struct pci_dev *pdev, if (adapter->num_rx_queues > 1 && adapter->intr.type == VMXNET3_IT_MSIX) { adapter->rss = true; - printk(KERN_INFO "RSS is enabled.\n"); + netdev->hw_features |= NETIF_F_RXHASH; + netdev->features |= NETIF_F_RXHASH; + dev_dbg(&pdev->dev, "RSS is enabled.\n"); } else { adapter->rss = false; } @@ -3063,13 +3062,11 @@ vmxnet3_probe_device(struct pci_dev *pdev, err = register_netdev(netdev); if (err) { - printk(KERN_ERR "Failed to register adapter %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to register adapter\n"); goto err_register; } vmxnet3_check_link(adapter, false); - atomic_inc(&devices_found); return 0; err_register: @@ -3311,7 +3308,7 @@ static struct pci_driver vmxnet3_driver = { static int __init vmxnet3_init_module(void) { - printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC, + pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC, VMXNET3_DRIVER_VERSION_REPORT); return pci_register_driver(&vmxnet3_driver); } diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 587a218b2345..9bc542be2937 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -207,7 +207,7 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - ETHTOOL_BUSINFO_LEN); + sizeof(drvinfo->bus_info)); drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS); drvinfo->testinfo_len = 0; drvinfo->eedump_len = 0; @@ -522,24 +522,23 @@ vmxnet3_set_ringparam(struct net_device *netdev, if (err) { /* failed, most likely because of OOM, try default * size */ - printk(KERN_ERR "%s: failed to apply new sizes, try the" - " default ones\n", netdev->name); + netdev_err(netdev, "failed to apply new sizes, " + "try the default ones\n"); err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, VMXNET3_DEF_RX_RING_SIZE, VMXNET3_DEF_RX_RING_SIZE); if (err) { - printk(KERN_ERR "%s: failed to create queues " - "with default sizes. Closing it\n", - netdev->name); + netdev_err(netdev, "failed to create queues " + "with default sizes. Closing it\n"); goto out; } } err = vmxnet3_activate_dev(adapter); if (err) - printk(KERN_ERR "%s: failed to re-activate, error %d." - " Closing it\n", netdev->name, err); + netdev_err(netdev, "failed to re-activate, error %d." + " Closing it\n", err); } out: diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index fc46a81ad538..3198384689d9 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -276,8 +276,6 @@ struct vmxnet3_rx_queue { struct vmxnet3_rx_ctx rx_ctx; u32 qid; /* rqID in RCD for buffer from 1st ring */ u32 qid2; /* rqID in RCD for buffer from 2nd ring */ - u32 uncommitted[2]; /* # of buffers allocated since last RXPROD - * update */ struct vmxnet3_rx_buf_info *buf_info[2]; struct Vmxnet3_RxQueueCtrl *shared; struct vmxnet3_rq_driver_stats stats; @@ -354,7 +352,6 @@ struct vmxnet3_adapter { unsigned long state; /* VMXNET3_STATE_BIT_xxx */ - int dev_number; int share_intr; }; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 656230e0d18c..9d70421cf3a0 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -29,6 +29,7 @@ #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/hash.h> +#include <linux/ethtool.h> #include <net/arp.h> #include <net/ndisc.h> #include <net/ip.h> @@ -392,7 +393,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], } /* Delete entry (via netlink) */ -static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev, +static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) { struct vxlan_dev *vxlan = netdev_priv(dev); @@ -1271,6 +1273,18 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) return 0; } +static void vxlan_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver)); +} + +static const struct ethtool_ops vxlan_ethtool_ops = { + .get_drvinfo = vxlan_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + static int vxlan_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { @@ -1348,6 +1362,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, vxlan->port_max = ntohs(p->high); } + SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops); + err = register_netdevice(dev); if (!err) hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni)); diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index d58431e99f73..0c077b0f7a2b 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -356,60 +356,6 @@ config SDLA To compile this driver as a module, choose M here: the module will be called sdla. -# Wan router core. -config WAN_ROUTER_DRIVERS - tristate "WAN router drivers" - depends on WAN_ROUTER - ---help--- - Connect LAN to WAN via Linux box. - - Select driver your card and remember to say Y to "Wan Router." - You will need the wan-tools package which is available from - <ftp://ftp.sangoma.com/>. - - Note that the answer to this question won't directly affect the - kernel except for how subordinate drivers may be built: - saying N will just cause the configurator to skip all - the questions about WAN router drivers. - - If unsure, say N. - -config CYCLADES_SYNC - tristate "Cyclom 2X(tm) cards (EXPERIMENTAL)" - depends on WAN_ROUTER_DRIVERS && (PCI || ISA) - ---help--- - Cyclom 2X from Cyclades Corporation <http://www.avocent.com/> is an - intelligent multiprotocol WAN adapter with data transfer rates up to - 512 Kbps. These cards support the X.25 and SNA related protocols. - - While no documentation is available at this time please grab the - wanconfig tarball in - <http://www.conectiva.com.br/~acme/cycsyn-devel/> (with minor changes - to make it compile with the current wanrouter include files; efforts - are being made to use the original package available at - <ftp://ftp.sangoma.com/>). - - Feel free to contact me or the cycsyn-devel mailing list at - <acme@conectiva.com.br> and <cycsyn-devel@bazar.conectiva.com.br> for - additional details, I hope to have documentation available as soon as - possible. (Cyclades Brazil is writing the Documentation). - - The next questions will ask you about the protocols you want the - driver to support (for now only X.25 is supported). - - If you have one or more of these cards, say Y to this option. - - To compile this driver as a module, choose M here: the - module will be called cyclomx. - -config CYCLOMX_X25 - bool "Cyclom 2X X.25 support (EXPERIMENTAL)" - depends on CYCLADES_SYNC - help - Connect a Cyclom 2X card to an X.25 network. - - Enabling X.25 support will enlarge your kernel by about 11 kB. - # X.25 network drivers config LAPBETHER tristate "LAPB over Ethernet driver (EXPERIMENTAL)" diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index df70248e2fda..c135ef47cbca 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -5,10 +5,6 @@ # Rewritten to use lists instead of if-statements. # -cyclomx-y := cycx_main.o -cyclomx-$(CONFIG_CYCLOMX_X25) += cycx_x25.o -cyclomx-objs := $(cyclomx-y) - obj-$(CONFIG_HDLC) += hdlc.o obj-$(CONFIG_HDLC_RAW) += hdlc_raw.o obj-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o @@ -28,7 +24,6 @@ obj-$(CONFIG_LANMEDIA) += lmc/ obj-$(CONFIG_DLCI) += dlci.o obj-$(CONFIG_SDLA) += sdla.o -obj-$(CONFIG_CYCLADES_SYNC) += cycx_drv.o cyclomx.o obj-$(CONFIG_LAPBETHER) += lapbether.o obj-$(CONFIG_SBNI) += sbni.o obj-$(CONFIG_N2) += n2.o diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 6aed238e573e..0179cefae438 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -795,8 +795,8 @@ static ssize_t cosa_read(struct file *file, if (mutex_lock_interruptible(&chan->rlock)) return -ERESTARTSYS; - if ((chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL)) == NULL) { - pr_info("%s: cosa_read() - OOM\n", cosa->name); + chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL); + if (chan->rxdata == NULL) { mutex_unlock(&chan->rlock); return -ENOMEM; } @@ -874,9 +874,8 @@ static ssize_t cosa_write(struct file *file, count = COSA_MTU; /* Allocate the buffer */ - if ((kbuf = kmalloc(count, GFP_KERNEL|GFP_DMA)) == NULL) { - pr_notice("%s: cosa_write() OOM - dropping packet\n", - cosa->name); + kbuf = kmalloc(count, GFP_KERNEL|GFP_DMA); + if (kbuf == NULL) { up(&chan->wsem); return -ENOMEM; } diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c deleted file mode 100644 index 2a3ecae67a90..000000000000 --- a/drivers/net/wan/cycx_drv.c +++ /dev/null @@ -1,569 +0,0 @@ -/* -* cycx_drv.c Cyclom 2X Support Module. -* -* This module is a library of common hardware specific -* functions used by the Cyclades Cyclom 2X sync card. -* -* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br> -* -* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo -* -* Based on sdladrv.c by Gene Kozin <genek@compuserve.com> -* -* This program is free software; you can redistribute it and/or -* modify it under the terms of the GNU General Public License -* as published by the Free Software Foundation; either version -* 2 of the License, or (at your option) any later version. -* ============================================================================ -* 1999/11/11 acme set_current_state(TASK_INTERRUPTIBLE), code -* cleanup -* 1999/11/08 acme init_cyc2x deleted, doing nothing -* 1999/11/06 acme back to read[bw], write[bw] and memcpy_to and -* fromio to use dpmbase ioremaped -* 1999/10/26 acme use isa_read[bw], isa_write[bw] & isa_memcpy_to -* & fromio -* 1999/10/23 acme cleanup to only supports cyclom2x: all the other -* boards are no longer manufactured by cyclades, -* if someone wants to support them... be my guest! -* 1999/05/28 acme cycx_intack & cycx_intde gone for good -* 1999/05/18 acme lots of unlogged work, submitting to Linus... -* 1999/01/03 acme more judicious use of data types -* 1999/01/03 acme judicious use of data types :> -* cycx_inten trying to reset pending interrupts -* from cyclom 2x - I think this isn't the way to -* go, but for now... -* 1999/01/02 acme cycx_intack ok, I think there's nothing to do -* to ack an int in cycx_drv.c, only handle it in -* cyx_isr (or in the other protocols: cyp_isr, -* cyf_isr, when they get implemented. -* Dec 31, 1998 acme cycx_data_boot & cycx_code_boot fixed, crossing -* fingers to see x25_configure in cycx_x25.c -* work... :) -* Dec 26, 1998 acme load implementation fixed, seems to work! :) -* cycx_2x_dpmbase_options with all the possible -* DPM addresses (20). -* cycx_intr implemented (test this!) -* general code cleanup -* Dec 8, 1998 Ivan Passos Cyclom-2X firmware load implementation. -* Aug 8, 1998 acme Initial version. -*/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/init.h> /* __init */ -#include <linux/module.h> -#include <linux/kernel.h> /* printk(), and other useful stuff */ -#include <linux/stddef.h> /* offsetof(), etc. */ -#include <linux/errno.h> /* return codes */ -#include <linux/cycx_drv.h> /* API definitions */ -#include <linux/cycx_cfm.h> /* CYCX firmware module definitions */ -#include <linux/delay.h> /* udelay, msleep_interruptible */ -#include <asm/io.h> /* read[wl], write[wl], ioremap, iounmap */ - -#define MOD_VERSION 0 -#define MOD_RELEASE 6 - -MODULE_AUTHOR("Arnaldo Carvalho de Melo"); -MODULE_DESCRIPTION("Cyclom 2x Sync Card Driver"); -MODULE_LICENSE("GPL"); - -/* Hardware-specific functions */ -static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len); -static void cycx_bootcfg(struct cycx_hw *hw); - -static int reset_cyc2x(void __iomem *addr); -static int detect_cyc2x(void __iomem *addr); - -/* Miscellaneous functions */ -static int get_option_index(const long *optlist, long optval); -static u16 checksum(u8 *buf, u32 len); - -#define wait_cyc(addr) cycx_exec(addr + CMD_OFFSET) - -/* Global Data */ - -/* private data */ -static const char fullname[] = "Cyclom 2X Support Module"; -static const char copyright[] = - "(c) 1998-2003 Arnaldo Carvalho de Melo <acme@conectiva.com.br>"; - -/* Hardware configuration options. - * These are arrays of configuration options used by verification routines. - * The first element of each array is its size (i.e. number of options). - */ -static const long cyc2x_dpmbase_options[] = { - 20, - 0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, 0xB8000, - 0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, 0xD0000, 0xD4000, - 0xD8000, 0xDC000, 0xE0000, 0xE4000, 0xE8000, 0xEC000 -}; - -static const long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 }; - -/* Kernel Loadable Module Entry Points */ -/* Module 'insert' entry point. - * o print announcement - * o initialize static data - * - * Return: 0 Ok - * < 0 error. - * Context: process */ - -static int __init cycx_drv_init(void) -{ - pr_info("%s v%u.%u %s\n", - fullname, MOD_VERSION, MOD_RELEASE, copyright); - - return 0; -} - -/* Module 'remove' entry point. - * o release all remaining system resources */ -static void cycx_drv_cleanup(void) -{ -} - -/* Kernel APIs */ -/* Set up adapter. - * o detect adapter type - * o verify hardware configuration options - * o check for hardware conflicts - * o set up adapter shared memory - * o test adapter memory - * o load firmware - * Return: 0 ok. - * < 0 error */ -EXPORT_SYMBOL(cycx_setup); -int cycx_setup(struct cycx_hw *hw, void *cfm, u32 len, unsigned long dpmbase) -{ - int err; - - /* Verify IRQ configuration options */ - if (!get_option_index(cycx_2x_irq_options, hw->irq)) { - pr_err("IRQ %d is invalid!\n", hw->irq); - return -EINVAL; - } - - /* Setup adapter dual-port memory window and test memory */ - if (!dpmbase) { - pr_err("you must specify the dpm address!\n"); - return -EINVAL; - } else if (!get_option_index(cyc2x_dpmbase_options, dpmbase)) { - pr_err("memory address 0x%lX is invalid!\n", dpmbase); - return -EINVAL; - } - - hw->dpmbase = ioremap(dpmbase, CYCX_WINDOWSIZE); - hw->dpmsize = CYCX_WINDOWSIZE; - - if (!detect_cyc2x(hw->dpmbase)) { - pr_err("adapter Cyclom 2X not found at address 0x%lX!\n", - dpmbase); - return -EINVAL; - } - - pr_info("found Cyclom 2X card at address 0x%lX\n", dpmbase); - - /* Load firmware. If loader fails then shut down adapter */ - err = load_cyc2x(hw, cfm, len); - - if (err) - cycx_down(hw); /* shutdown adapter */ - - return err; -} - -EXPORT_SYMBOL(cycx_down); -int cycx_down(struct cycx_hw *hw) -{ - iounmap(hw->dpmbase); - return 0; -} - -/* Enable interrupt generation. */ -static void cycx_inten(struct cycx_hw *hw) -{ - writeb(0, hw->dpmbase); -} - -/* Generate an interrupt to adapter's CPU. */ -EXPORT_SYMBOL(cycx_intr); -void cycx_intr(struct cycx_hw *hw) -{ - writew(0, hw->dpmbase + GEN_CYCX_INTR); -} - -/* Execute Adapter Command. - * o Set exec flag. - * o Busy-wait until flag is reset. */ -EXPORT_SYMBOL(cycx_exec); -int cycx_exec(void __iomem *addr) -{ - u16 i = 0; - /* wait till addr content is zeroed */ - - while (readw(addr)) { - udelay(1000); - - if (++i > 50) - return -1; - } - - return 0; -} - -/* Read absolute adapter memory. - * Transfer data from adapter's memory to data buffer. */ -EXPORT_SYMBOL(cycx_peek); -int cycx_peek(struct cycx_hw *hw, u32 addr, void *buf, u32 len) -{ - if (len == 1) - *(u8*)buf = readb(hw->dpmbase + addr); - else - memcpy_fromio(buf, hw->dpmbase + addr, len); - - return 0; -} - -/* Write Absolute Adapter Memory. - * Transfer data from data buffer to adapter's memory. */ -EXPORT_SYMBOL(cycx_poke); -int cycx_poke(struct cycx_hw *hw, u32 addr, void *buf, u32 len) -{ - if (len == 1) - writeb(*(u8*)buf, hw->dpmbase + addr); - else - memcpy_toio(hw->dpmbase + addr, buf, len); - - return 0; -} - -/* Hardware-Specific Functions */ - -/* Load Aux Routines */ -/* Reset board hardware. - return 1 if memory exists at addr and 0 if not. */ -static int memory_exists(void __iomem *addr) -{ - int tries = 0; - - for (; tries < 3 ; tries++) { - writew(TEST_PATTERN, addr + 0x10); - - if (readw(addr + 0x10) == TEST_PATTERN) - if (readw(addr + 0x10) == TEST_PATTERN) - return 1; - - msleep_interruptible(1 * 1000); - } - - return 0; -} - -/* Load reset code. */ -static void reset_load(void __iomem *addr, u8 *buffer, u32 cnt) -{ - void __iomem *pt_code = addr + RESET_OFFSET; - u16 i; /*, j; */ - - for (i = 0 ; i < cnt ; i++) { -/* for (j = 0 ; j < 50 ; j++); Delay - FIXME busy waiting... */ - writeb(*buffer++, pt_code++); - } -} - -/* Load buffer using boot interface. - * o copy data from buffer to Cyclom-X memory - * o wait for reset code to copy it to right portion of memory */ -static int buffer_load(void __iomem *addr, u8 *buffer, u32 cnt) -{ - memcpy_toio(addr + DATA_OFFSET, buffer, cnt); - writew(GEN_BOOT_DAT, addr + CMD_OFFSET); - - return wait_cyc(addr); -} - -/* Set up entry point and kick start Cyclom-X CPU. */ -static void cycx_start(void __iomem *addr) -{ - /* put in 0x30 offset the jump instruction to the code entry point */ - writeb(0xea, addr + 0x30); - writeb(0x00, addr + 0x31); - writeb(0xc4, addr + 0x32); - writeb(0x00, addr + 0x33); - writeb(0x00, addr + 0x34); - - /* cmd to start executing code */ - writew(GEN_START, addr + CMD_OFFSET); -} - -/* Load and boot reset code. */ -static void cycx_reset_boot(void __iomem *addr, u8 *code, u32 len) -{ - void __iomem *pt_start = addr + START_OFFSET; - - writeb(0xea, pt_start++); /* jmp to f000:3f00 */ - writeb(0x00, pt_start++); - writeb(0xfc, pt_start++); - writeb(0x00, pt_start++); - writeb(0xf0, pt_start); - reset_load(addr, code, len); - - /* 80186 was in hold, go */ - writeb(0, addr + START_CPU); - msleep_interruptible(1 * 1000); -} - -/* Load data.bin file through boot (reset) interface. */ -static int cycx_data_boot(void __iomem *addr, u8 *code, u32 len) -{ - void __iomem *pt_boot_cmd = addr + CMD_OFFSET; - u32 i; - - /* boot buffer length */ - writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16)); - writew(GEN_DEFPAR, pt_boot_cmd); - - if (wait_cyc(addr) < 0) - return -1; - - writew(0, pt_boot_cmd + sizeof(u16)); - writew(0x4000, pt_boot_cmd + 2 * sizeof(u16)); - writew(GEN_SET_SEG, pt_boot_cmd); - - if (wait_cyc(addr) < 0) - return -1; - - for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ) - if (buffer_load(addr, code + i, - min_t(u32, CFM_LOAD_BUFSZ, (len - i))) < 0) { - pr_err("Error !!\n"); - return -1; - } - - return 0; -} - - -/* Load code.bin file through boot (reset) interface. */ -static int cycx_code_boot(void __iomem *addr, u8 *code, u32 len) -{ - void __iomem *pt_boot_cmd = addr + CMD_OFFSET; - u32 i; - - /* boot buffer length */ - writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16)); - writew(GEN_DEFPAR, pt_boot_cmd); - - if (wait_cyc(addr) < 0) - return -1; - - writew(0x0000, pt_boot_cmd + sizeof(u16)); - writew(0xc400, pt_boot_cmd + 2 * sizeof(u16)); - writew(GEN_SET_SEG, pt_boot_cmd); - - if (wait_cyc(addr) < 0) - return -1; - - for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ) - if (buffer_load(addr, code + i, - min_t(u32, CFM_LOAD_BUFSZ, (len - i)))) { - pr_err("Error !!\n"); - return -1; - } - - return 0; -} - -/* Load adapter from the memory image of the CYCX firmware module. - * o verify firmware integrity and compatibility - * o start adapter up */ -static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len) -{ - int i, j; - struct cycx_fw_header *img_hdr; - u8 *reset_image, - *data_image, - *code_image; - void __iomem *pt_cycld = hw->dpmbase + 0x400; - u16 cksum; - - /* Announce */ - pr_info("firmware signature=\"%s\"\n", cfm->signature); - - /* Verify firmware signature */ - if (strcmp(cfm->signature, CFM_SIGNATURE)) { - pr_err("load_cyc2x: not Cyclom-2X firmware!\n"); - return -EINVAL; - } - - pr_info("firmware version=%u\n", cfm->version); - - /* Verify firmware module format version */ - if (cfm->version != CFM_VERSION) { - pr_err("%s: firmware format %u rejected! Expecting %u.\n", - __func__, cfm->version, CFM_VERSION); - return -EINVAL; - } - - /* Verify firmware module length and checksum */ - cksum = checksum((u8*)&cfm->info, sizeof(struct cycx_fw_info) + - cfm->info.codesize); -/* - FIXME cfm->info.codesize is off by 2 - if (((len - sizeof(struct cycx_firmware) - 1) != cfm->info.codesize) || -*/ - if (cksum != cfm->checksum) { - pr_err("%s: firmware corrupted!\n", __func__); - pr_err(" cdsize = 0x%x (expected 0x%lx)\n", - len - (int)sizeof(struct cycx_firmware) - 1, - cfm->info.codesize); - pr_err(" chksum = 0x%x (expected 0x%x)\n", - cksum, cfm->checksum); - return -EINVAL; - } - - /* If everything is ok, set reset, data and code pointers */ - img_hdr = (struct cycx_fw_header *)&cfm->image; -#ifdef FIRMWARE_DEBUG - pr_info("%s: image sizes\n", __func__); - pr_info(" reset=%lu\n", img_hdr->reset_size); - pr_info(" data=%lu\n", img_hdr->data_size); - pr_info(" code=%lu\n", img_hdr->code_size); -#endif - reset_image = ((u8 *)img_hdr) + sizeof(struct cycx_fw_header); - data_image = reset_image + img_hdr->reset_size; - code_image = data_image + img_hdr->data_size; - - /*---- Start load ----*/ - /* Announce */ - pr_info("loading firmware %s (ID=%u)...\n", - cfm->descr[0] ? cfm->descr : "unknown firmware", - cfm->info.codeid); - - for (i = 0 ; i < 5 ; i++) { - /* Reset Cyclom hardware */ - if (!reset_cyc2x(hw->dpmbase)) { - pr_err("dpm problem or board not found\n"); - return -EINVAL; - } - - /* Load reset.bin */ - cycx_reset_boot(hw->dpmbase, reset_image, img_hdr->reset_size); - /* reset is waiting for boot */ - writew(GEN_POWER_ON, pt_cycld); - msleep_interruptible(1 * 1000); - - for (j = 0 ; j < 3 ; j++) - if (!readw(pt_cycld)) - goto reset_loaded; - else - msleep_interruptible(1 * 1000); - } - - pr_err("reset not started\n"); - return -EINVAL; - -reset_loaded: - /* Load data.bin */ - if (cycx_data_boot(hw->dpmbase, data_image, img_hdr->data_size)) { - pr_err("cannot load data file\n"); - return -EINVAL; - } - - /* Load code.bin */ - if (cycx_code_boot(hw->dpmbase, code_image, img_hdr->code_size)) { - pr_err("cannot load code file\n"); - return -EINVAL; - } - - /* Prepare boot-time configuration data */ - cycx_bootcfg(hw); - - /* kick-off CPU */ - cycx_start(hw->dpmbase); - - /* Arthur Ganzert's tip: wait a while after the firmware loading... - seg abr 26 17:17:12 EST 1999 - acme */ - msleep_interruptible(7 * 1000); - pr_info("firmware loaded!\n"); - - /* enable interrupts */ - cycx_inten(hw); - - return 0; -} - -/* Prepare boot-time firmware configuration data. - * o initialize configuration data area - From async.doc - V_3.4.0 - 07/18/1994 - - As of now, only static buffers are available to the user. - So, the bit VD_RXDIRC must be set in 'valid'. That means that user - wants to use the static transmission and reception buffers. */ -static void cycx_bootcfg(struct cycx_hw *hw) -{ - /* use fixed buffers */ - writeb(FIXED_BUFFERS, hw->dpmbase + CONF_OFFSET); -} - -/* Detect Cyclom 2x adapter. - * Following tests are used to detect Cyclom 2x adapter: - * to be completed based on the tests done below - * Return 1 if detected o.k. or 0 if failed. - * Note: This test is destructive! Adapter will be left in shutdown - * state after the test. */ -static int detect_cyc2x(void __iomem *addr) -{ - reset_cyc2x(addr); - - return memory_exists(addr); -} - -/* Miscellaneous */ -/* Get option's index into the options list. - * Return option's index (1 .. N) or zero if option is invalid. */ -static int get_option_index(const long *optlist, long optval) -{ - int i = 1; - - for (; i <= optlist[0]; ++i) - if (optlist[i] == optval) - return i; - - return 0; -} - -/* Reset adapter's CPU. */ -static int reset_cyc2x(void __iomem *addr) -{ - writeb(0, addr + RST_ENABLE); - msleep_interruptible(2 * 1000); - writeb(0, addr + RST_DISABLE); - msleep_interruptible(2 * 1000); - - return memory_exists(addr); -} - -/* Calculate 16-bit CRC using CCITT polynomial. */ -static u16 checksum(u8 *buf, u32 len) -{ - u16 crc = 0; - u16 mask, flag; - - for (; len; --len, ++buf) - for (mask = 0x80; mask; mask >>= 1) { - flag = (crc & 0x8000); - crc <<= 1; - crc |= ((*buf & mask) ? 1 : 0); - - if (flag) - crc ^= 0x1021; - } - - return crc; -} - -module_init(cycx_drv_init); -module_exit(cycx_drv_cleanup); - -/* End */ diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c deleted file mode 100644 index 81fbbad406be..000000000000 --- a/drivers/net/wan/cycx_main.c +++ /dev/null @@ -1,346 +0,0 @@ -/* -* cycx_main.c Cyclades Cyclom 2X WAN Link Driver. Main module. -* -* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br> -* -* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo -* -* Based on sdlamain.c by Gene Kozin <genek@compuserve.com> & -* Jaspreet Singh <jaspreet@sangoma.com> -* -* This program is free software; you can redistribute it and/or -* modify it under the terms of the GNU General Public License -* as published by the Free Software Foundation; either version -* 2 of the License, or (at your option) any later version. -* ============================================================================ -* Please look at the bitkeeper changelog (or any other scm tool that ends up -* importing bitkeeper changelog or that replaces bitkeeper in the future as -* main tool for linux development). -* -* 2001/05/09 acme Fix MODULE_DESC for debug, .bss nitpicks, -* some cleanups -* 2000/07/13 acme remove useless #ifdef MODULE and crap -* #if KERNEL_VERSION > blah -* 2000/07/06 acme __exit at cyclomx_cleanup -* 2000/04/02 acme dprintk and cycx_debug -* module_init/module_exit -* 2000/01/21 acme rename cyclomx_open to cyclomx_mod_inc_use_count -* and cyclomx_close to cyclomx_mod_dec_use_count -* 2000/01/08 acme cleanup -* 1999/11/06 acme cycx_down back to life (it needs to be -* called to iounmap the dpmbase) -* 1999/08/09 acme removed references to enable_tx_int -* use spinlocks instead of cli/sti in -* cyclomx_set_state -* 1999/05/19 acme works directly linked into the kernel -* init_waitqueue_head for 2.3.* kernel -* 1999/05/18 acme major cleanup (polling not needed), etc -* 1998/08/28 acme minor cleanup (ioctls for firmware deleted) -* queue_task activated -* 1998/08/08 acme Initial version. -*/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/stddef.h> /* offsetof(), etc. */ -#include <linux/errno.h> /* return codes */ -#include <linux/string.h> /* inline memset(), etc. */ -#include <linux/slab.h> /* kmalloc(), kfree() */ -#include <linux/kernel.h> /* printk(), and other useful stuff */ -#include <linux/module.h> /* support for loadable modules */ -#include <linux/ioport.h> /* request_region(), release_region() */ -#include <linux/wanrouter.h> /* WAN router definitions */ -#include <linux/cyclomx.h> /* cyclomx common user API definitions */ -#include <linux/init.h> /* __init (when not using as a module) */ -#include <linux/interrupt.h> - -unsigned int cycx_debug; - -MODULE_AUTHOR("Arnaldo Carvalho de Melo"); -MODULE_DESCRIPTION("Cyclom 2X Sync Card Driver."); -MODULE_LICENSE("GPL"); -module_param(cycx_debug, int, 0); -MODULE_PARM_DESC(cycx_debug, "cyclomx debug level"); - -/* Defines & Macros */ - -#define CYCX_DRV_VERSION 0 /* version number */ -#define CYCX_DRV_RELEASE 11 /* release (minor version) number */ -#define CYCX_MAX_CARDS 1 /* max number of adapters */ - -#define CONFIG_CYCX_CARDS 1 - -/* Function Prototypes */ - -/* WAN link driver entry points */ -static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf); -static int cycx_wan_shutdown(struct wan_device *wandev); - -/* Miscellaneous functions */ -static irqreturn_t cycx_isr(int irq, void *dev_id); - -/* Global Data - * Note: All data must be explicitly initialized!!! - */ - -/* private data */ -static const char cycx_drvname[] = "cyclomx"; -static const char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver"; -static const char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo " - "<acme@conectiva.com.br>"; -static int cycx_ncards = CONFIG_CYCX_CARDS; -static struct cycx_device *cycx_card_array; /* adapter data space */ - -/* Kernel Loadable Module Entry Points */ - -/* - * Module 'insert' entry point. - * o print announcement - * o allocate adapter data space - * o initialize static data - * o register all cards with WAN router - * o calibrate Cyclom 2X shared memory access delay. - * - * Return: 0 Ok - * < 0 error. - * Context: process - */ -static int __init cycx_init(void) -{ - int cnt, err = -ENOMEM; - - pr_info("%s v%u.%u %s\n", - cycx_fullname, CYCX_DRV_VERSION, CYCX_DRV_RELEASE, - cycx_copyright); - - /* Verify number of cards and allocate adapter data space */ - cycx_ncards = min_t(int, cycx_ncards, CYCX_MAX_CARDS); - cycx_ncards = max_t(int, cycx_ncards, 1); - cycx_card_array = kcalloc(cycx_ncards, sizeof(struct cycx_device), GFP_KERNEL); - if (!cycx_card_array) - goto out; - - - /* Register adapters with WAN router */ - for (cnt = 0; cnt < cycx_ncards; ++cnt) { - struct cycx_device *card = &cycx_card_array[cnt]; - struct wan_device *wandev = &card->wandev; - - sprintf(card->devname, "%s%d", cycx_drvname, cnt + 1); - wandev->magic = ROUTER_MAGIC; - wandev->name = card->devname; - wandev->private = card; - wandev->setup = cycx_wan_setup; - wandev->shutdown = cycx_wan_shutdown; - err = register_wan_device(wandev); - - if (err) { - pr_err("%s registration failed with error %d!\n", - card->devname, err); - break; - } - } - - err = -ENODEV; - if (!cnt) { - kfree(cycx_card_array); - goto out; - } - err = 0; - cycx_ncards = cnt; /* adjust actual number of cards */ -out: return err; -} - -/* - * Module 'remove' entry point. - * o unregister all adapters from the WAN router - * o release all remaining system resources - */ -static void __exit cycx_exit(void) -{ - int i = 0; - - for (; i < cycx_ncards; ++i) { - struct cycx_device *card = &cycx_card_array[i]; - unregister_wan_device(card->devname); - } - - kfree(cycx_card_array); -} - -/* WAN Device Driver Entry Points */ -/* - * Setup/configure WAN link driver. - * o check adapter state - * o make sure firmware is present in configuration - * o allocate interrupt vector - * o setup Cyclom 2X hardware - * o call appropriate routine to perform protocol-specific initialization - * - * This function is called when router handles ROUTER_SETUP IOCTL. The - * configuration structure is in kernel memory (including extended data, if - * any). - */ -static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf) -{ - int rc = -EFAULT; - struct cycx_device *card; - int irq; - - /* Sanity checks */ - - if (!wandev || !wandev->private || !conf) - goto out; - - card = wandev->private; - rc = -EBUSY; - if (wandev->state != WAN_UNCONFIGURED) - goto out; - - rc = -EINVAL; - if (!conf->data_size || !conf->data) { - pr_err("%s: firmware not found in configuration data!\n", - wandev->name); - goto out; - } - - if (conf->irq <= 0) { - pr_err("%s: can't configure without IRQ!\n", wandev->name); - goto out; - } - - /* Allocate IRQ */ - irq = conf->irq == 2 ? 9 : conf->irq; /* IRQ2 -> IRQ9 */ - - if (request_irq(irq, cycx_isr, 0, wandev->name, card)) { - pr_err("%s: can't reserve IRQ %d!\n", wandev->name, irq); - goto out; - } - - /* Configure hardware, load firmware, etc. */ - memset(&card->hw, 0, sizeof(card->hw)); - card->hw.irq = irq; - card->hw.dpmsize = CYCX_WINDOWSIZE; - card->hw.fwid = CFID_X25_2X; - spin_lock_init(&card->lock); - init_waitqueue_head(&card->wait_stats); - - rc = cycx_setup(&card->hw, conf->data, conf->data_size, conf->maddr); - if (rc) - goto out_irq; - - /* Initialize WAN device data space */ - wandev->irq = irq; - wandev->dma = wandev->ioport = 0; - wandev->maddr = (unsigned long)card->hw.dpmbase; - wandev->msize = card->hw.dpmsize; - wandev->hw_opt[2] = 0; - wandev->hw_opt[3] = card->hw.fwid; - - /* Protocol-specific initialization */ - switch (card->hw.fwid) { -#ifdef CONFIG_CYCLOMX_X25 - case CFID_X25_2X: - rc = cycx_x25_wan_init(card, conf); - break; -#endif - default: - pr_err("%s: this firmware is not supported!\n", wandev->name); - rc = -EINVAL; - } - - if (rc) { - cycx_down(&card->hw); - goto out_irq; - } - - rc = 0; -out: - return rc; -out_irq: - free_irq(irq, card); - goto out; -} - -/* - * Shut down WAN link driver. - * o shut down adapter hardware - * o release system resources. - * - * This function is called by the router when device is being unregistered or - * when it handles ROUTER_DOWN IOCTL. - */ -static int cycx_wan_shutdown(struct wan_device *wandev) -{ - int ret = -EFAULT; - struct cycx_device *card; - - /* sanity checks */ - if (!wandev || !wandev->private) - goto out; - - ret = 0; - if (wandev->state == WAN_UNCONFIGURED) - goto out; - - card = wandev->private; - wandev->state = WAN_UNCONFIGURED; - cycx_down(&card->hw); - pr_info("%s: irq %d being freed!\n", wandev->name, wandev->irq); - free_irq(wandev->irq, card); -out: return ret; -} - -/* Miscellaneous */ -/* - * Cyclom 2X Interrupt Service Routine. - * o acknowledge Cyclom 2X hardware interrupt. - * o call protocol-specific interrupt service routine, if any. - */ -static irqreturn_t cycx_isr(int irq, void *dev_id) -{ - struct cycx_device *card = dev_id; - - if (card->wandev.state == WAN_UNCONFIGURED) - goto out; - - if (card->in_isr) { - pr_warn("%s: interrupt re-entrancy on IRQ %d!\n", - card->devname, card->wandev.irq); - goto out; - } - - if (card->isr) - card->isr(card); - return IRQ_HANDLED; -out: - return IRQ_NONE; -} - -/* Set WAN device state. */ -void cycx_set_state(struct cycx_device *card, int state) -{ - unsigned long flags; - char *string_state = NULL; - - spin_lock_irqsave(&card->lock, flags); - - if (card->wandev.state != state) { - switch (state) { - case WAN_CONNECTED: - string_state = "connected!"; - break; - case WAN_DISCONNECTED: - string_state = "disconnected!"; - break; - } - pr_info("%s: link %s\n", card->devname, string_state); - card->wandev.state = state; - } - - card->state_tick = jiffies; - spin_unlock_irqrestore(&card->lock, flags); -} - -module_init(cycx_init); -module_exit(cycx_exit); diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c deleted file mode 100644 index 06f3f6309e4b..000000000000 --- a/drivers/net/wan/cycx_x25.c +++ /dev/null @@ -1,1602 +0,0 @@ -/* -* cycx_x25.c Cyclom 2X WAN Link Driver. X.25 module. -* -* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br> -* -* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo -* -* Based on sdla_x25.c by Gene Kozin <genek@compuserve.com> -* -* This program is free software; you can redistribute it and/or -* modify it under the terms of the GNU General Public License -* as published by the Free Software Foundation; either version -* 2 of the License, or (at your option) any later version. -* ============================================================================ -* 2001/01/12 acme use dev_kfree_skb_irq on interrupt context -* 2000/04/02 acme dprintk, cycx_debug -* fixed the bug introduced in get_dev_by_lcn and -* get_dev_by_dte_addr by the anonymous hacker -* that converted this driver to softnet -* 2000/01/08 acme cleanup -* 1999/10/27 acme use ARPHRD_HWX25 so that the X.25 stack know -* that we have a X.25 stack implemented in -* firmware onboard -* 1999/10/18 acme support for X.25 sockets in if_send, -* beware: socket(AF_X25...) IS WORK IN PROGRESS, -* TCP/IP over X.25 via wanrouter not affected, -* working. -* 1999/10/09 acme chan_disc renamed to chan_disconnect, -* began adding support for X.25 sockets: -* conf->protocol in new_if -* 1999/10/05 acme fixed return E... to return -E... -* 1999/08/10 acme serialized access to the card thru a spinlock -* in x25_exec -* 1999/08/09 acme removed per channel spinlocks -* removed references to enable_tx_int -* 1999/05/28 acme fixed nibble_to_byte, ackvc now properly treated -* if_send simplified -* 1999/05/25 acme fixed t1, t2, t21 & t23 configuration -* use spinlocks instead of cli/sti in some points -* 1999/05/24 acme finished the x25_get_stat function -* 1999/05/23 acme dev->type = ARPHRD_X25 (tcpdump only works, -* AFAIT, with ARPHRD_ETHER). This seems to be -* needed to use socket(AF_X25)... -* Now the config file must specify a peer media -* address for svc channels over a crossover cable. -* Removed hold_timeout from x25_channel_t, -* not used. -* A little enhancement in the DEBUG processing -* 1999/05/22 acme go to DISCONNECTED in disconnect_confirm_intr, -* instead of chan_disc. -* 1999/05/16 marcelo fixed timer initialization in SVCs -* 1999/01/05 acme x25_configure now get (most of) all -* parameters... -* 1999/01/05 acme pktlen now (correctly) uses log2 (value -* configured) -* 1999/01/03 acme judicious use of data types (u8, u16, u32, etc) -* 1999/01/03 acme cyx_isr: reset dpmbase to acknowledge -* indication (interrupt from cyclom 2x) -* 1999/01/02 acme cyx_isr: first hackings... -* 1999/01/0203 acme when initializing an array don't give less -* elements than declared... -* example: char send_cmd[6] = "?\xFF\x10"; -* you'll gonna lose a couple hours, 'cause your -* brain won't admit that there's an error in the -* above declaration... the side effect is that -* memset is put into the unresolved symbols -* instead of using the inline memset functions... -* 1999/01/02 acme began chan_connect, chan_send, x25_send -* 1998/12/31 acme x25_configure -* this code can be compiled as non module -* 1998/12/27 acme code cleanup -* IPX code wiped out! let's decrease code -* complexity for now, remember: I'm learning! :) -* bps_to_speed_code OK -* 1998/12/26 acme Minimal debug code cleanup -* 1998/08/08 acme Initial version. -*/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#define CYCLOMX_X25_DEBUG 1 - -#include <linux/ctype.h> /* isdigit() */ -#include <linux/errno.h> /* return codes */ -#include <linux/if_arp.h> /* ARPHRD_HWX25 */ -#include <linux/kernel.h> /* printk(), and other useful stuff */ -#include <linux/module.h> -#include <linux/string.h> /* inline memset(), etc. */ -#include <linux/sched.h> -#include <linux/slab.h> /* kmalloc(), kfree() */ -#include <linux/stddef.h> /* offsetof(), etc. */ -#include <linux/wanrouter.h> /* WAN router definitions */ - -#include <asm/byteorder.h> /* htons(), etc. */ - -#include <linux/cyclomx.h> /* Cyclom 2X common user API definitions */ -#include <linux/cycx_x25.h> /* X.25 firmware API definitions */ - -#include <net/x25device.h> - -/* Defines & Macros */ -#define CYCX_X25_MAX_CMD_RETRY 5 -#define CYCX_X25_CHAN_MTU 2048 /* unfragmented logical channel MTU */ - -/* Data Structures */ -/* This is an extension of the 'struct net_device' we create for each network - interface to keep the rest of X.25 channel-specific data. */ -struct cycx_x25_channel { - /* This member must be first. */ - struct net_device *slave; /* WAN slave */ - - char name[WAN_IFNAME_SZ+1]; /* interface name, ASCIIZ */ - char addr[WAN_ADDRESS_SZ+1]; /* media address, ASCIIZ */ - char *local_addr; /* local media address, ASCIIZ - - svc thru crossover cable */ - s16 lcn; /* logical channel number/conn.req.key*/ - u8 link; - struct timer_list timer; /* timer used for svc channel disc. */ - u16 protocol; /* ethertype, 0 - multiplexed */ - u8 svc; /* 0 - permanent, 1 - switched */ - u8 state; /* channel state */ - u8 drop_sequence; /* mark sequence for dropping */ - u32 idle_tmout; /* sec, before disconnecting */ - struct sk_buff *rx_skb; /* receive socket buffer */ - struct cycx_device *card; /* -> owner */ - struct net_device_stats ifstats;/* interface statistics */ -}; - -/* Function Prototypes */ -/* WAN link driver entry points. These are called by the WAN router module. */ -static int cycx_wan_update(struct wan_device *wandev), - cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev, - wanif_conf_t *conf), - cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev); - -/* Network device interface */ -static int cycx_netdevice_init(struct net_device *dev); -static int cycx_netdevice_open(struct net_device *dev); -static int cycx_netdevice_stop(struct net_device *dev); -static int cycx_netdevice_hard_header(struct sk_buff *skb, - struct net_device *dev, u16 type, - const void *daddr, const void *saddr, - unsigned len); -static int cycx_netdevice_rebuild_header(struct sk_buff *skb); -static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb, - struct net_device *dev); - -static struct net_device_stats * - cycx_netdevice_get_stats(struct net_device *dev); - -/* Interrupt handlers */ -static void cycx_x25_irq_handler(struct cycx_device *card), - cycx_x25_irq_tx(struct cycx_device *card, struct cycx_x25_cmd *cmd), - cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd), - cycx_x25_irq_log(struct cycx_device *card, - struct cycx_x25_cmd *cmd), - cycx_x25_irq_stat(struct cycx_device *card, - struct cycx_x25_cmd *cmd), - cycx_x25_irq_connect_confirm(struct cycx_device *card, - struct cycx_x25_cmd *cmd), - cycx_x25_irq_disconnect_confirm(struct cycx_device *card, - struct cycx_x25_cmd *cmd), - cycx_x25_irq_connect(struct cycx_device *card, - struct cycx_x25_cmd *cmd), - cycx_x25_irq_disconnect(struct cycx_device *card, - struct cycx_x25_cmd *cmd), - cycx_x25_irq_spurious(struct cycx_device *card, - struct cycx_x25_cmd *cmd); - -/* X.25 firmware interface functions */ -static int cycx_x25_configure(struct cycx_device *card, - struct cycx_x25_config *conf), - cycx_x25_get_stats(struct cycx_device *card), - cycx_x25_send(struct cycx_device *card, u8 link, u8 lcn, u8 bitm, - int len, void *buf), - cycx_x25_connect_response(struct cycx_device *card, - struct cycx_x25_channel *chan), - cycx_x25_disconnect_response(struct cycx_device *card, u8 link, - u8 lcn); - -/* channel functions */ -static int cycx_x25_chan_connect(struct net_device *dev), - cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb); - -static void cycx_x25_chan_disconnect(struct net_device *dev), - cycx_x25_chan_send_event(struct net_device *dev, u8 event); - -/* Miscellaneous functions */ -static void cycx_x25_set_chan_state(struct net_device *dev, u8 state), - cycx_x25_chan_timer(unsigned long d); - -static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble), - reset_timer(struct net_device *dev); - -static u8 bps_to_speed_code(u32 bps); -static u8 cycx_log2(u32 n); - -static unsigned dec_to_uint(u8 *str, int len); - -static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev, - s16 lcn); -static struct net_device * - cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte); - -static void cycx_x25_chan_setup(struct net_device *dev); - -#ifdef CYCLOMX_X25_DEBUG -static void hex_dump(char *msg, unsigned char *p, int len); -static void cycx_x25_dump_config(struct cycx_x25_config *conf); -static void cycx_x25_dump_stats(struct cycx_x25_stats *stats); -static void cycx_x25_dump_devs(struct wan_device *wandev); -#else -#define hex_dump(msg, p, len) -#define cycx_x25_dump_config(conf) -#define cycx_x25_dump_stats(stats) -#define cycx_x25_dump_devs(wandev) -#endif -/* Public Functions */ - -/* X.25 Protocol Initialization routine. - * - * This routine is called by the main Cyclom 2X module during setup. At this - * point adapter is completely initialized and X.25 firmware is running. - * o configure adapter - * o initialize protocol-specific fields of the adapter data space. - * - * Return: 0 o.k. - * < 0 failure. */ -int cycx_x25_wan_init(struct cycx_device *card, wandev_conf_t *conf) -{ - struct cycx_x25_config cfg; - - /* Verify configuration ID */ - if (conf->config_id != WANCONFIG_X25) { - pr_info("%s: invalid configuration ID %u!\n", - card->devname, conf->config_id); - return -EINVAL; - } - - /* Initialize protocol-specific fields */ - card->mbox = card->hw.dpmbase + X25_MBOX_OFFS; - card->u.x.connection_keys = 0; - spin_lock_init(&card->u.x.lock); - - /* Configure adapter. Here we set reasonable defaults, then parse - * device configuration structure and set configuration options. - * Most configuration options are verified and corrected (if - * necessary) since we can't rely on the adapter to do so and don't - * want it to fail either. */ - memset(&cfg, 0, sizeof(cfg)); - cfg.link = 0; - cfg.clock = conf->clocking == WANOPT_EXTERNAL ? 8 : 55; - cfg.speed = bps_to_speed_code(conf->bps); - cfg.n3win = 7; - cfg.n2win = 2; - cfg.n2 = 5; - cfg.nvc = 1; - cfg.npvc = 1; - cfg.flags = 0x02; /* default = V35 */ - cfg.t1 = 10; /* line carrier timeout */ - cfg.t2 = 29; /* tx timeout */ - cfg.t21 = 180; /* CALL timeout */ - cfg.t23 = 180; /* CLEAR timeout */ - - /* adjust MTU */ - if (!conf->mtu || conf->mtu >= 512) - card->wandev.mtu = 512; - else if (conf->mtu >= 256) - card->wandev.mtu = 256; - else if (conf->mtu >= 128) - card->wandev.mtu = 128; - else - card->wandev.mtu = 64; - - cfg.pktlen = cycx_log2(card->wandev.mtu); - - if (conf->station == WANOPT_DTE) { - cfg.locaddr = 3; /* DTE */ - cfg.remaddr = 1; /* DCE */ - } else { - cfg.locaddr = 1; /* DCE */ - cfg.remaddr = 3; /* DTE */ - } - - if (conf->interface == WANOPT_RS232) - cfg.flags = 0; /* FIXME just reset the 2nd bit */ - - if (conf->u.x25.hi_pvc) { - card->u.x.hi_pvc = min_t(unsigned int, conf->u.x25.hi_pvc, 4095); - card->u.x.lo_pvc = min_t(unsigned int, conf->u.x25.lo_pvc, card->u.x.hi_pvc); - } - - if (conf->u.x25.hi_svc) { - card->u.x.hi_svc = min_t(unsigned int, conf->u.x25.hi_svc, 4095); - card->u.x.lo_svc = min_t(unsigned int, conf->u.x25.lo_svc, card->u.x.hi_svc); - } - - if (card->u.x.lo_pvc == 255) - cfg.npvc = 0; - else - cfg.npvc = card->u.x.hi_pvc - card->u.x.lo_pvc + 1; - - cfg.nvc = card->u.x.hi_svc - card->u.x.lo_svc + 1 + cfg.npvc; - - if (conf->u.x25.hdlc_window) - cfg.n2win = min_t(unsigned int, conf->u.x25.hdlc_window, 7); - - if (conf->u.x25.pkt_window) - cfg.n3win = min_t(unsigned int, conf->u.x25.pkt_window, 7); - - if (conf->u.x25.t1) - cfg.t1 = min_t(unsigned int, conf->u.x25.t1, 30); - - if (conf->u.x25.t2) - cfg.t2 = min_t(unsigned int, conf->u.x25.t2, 30); - - if (conf->u.x25.t11_t21) - cfg.t21 = min_t(unsigned int, conf->u.x25.t11_t21, 30); - - if (conf->u.x25.t13_t23) - cfg.t23 = min_t(unsigned int, conf->u.x25.t13_t23, 30); - - if (conf->u.x25.n2) - cfg.n2 = min_t(unsigned int, conf->u.x25.n2, 30); - - /* initialize adapter */ - if (cycx_x25_configure(card, &cfg)) - return -EIO; - - /* Initialize protocol-specific fields of adapter data space */ - card->wandev.bps = conf->bps; - card->wandev.interface = conf->interface; - card->wandev.clocking = conf->clocking; - card->wandev.station = conf->station; - card->isr = cycx_x25_irq_handler; - card->exec = NULL; - card->wandev.update = cycx_wan_update; - card->wandev.new_if = cycx_wan_new_if; - card->wandev.del_if = cycx_wan_del_if; - card->wandev.state = WAN_DISCONNECTED; - - return 0; -} - -/* WAN Device Driver Entry Points */ -/* Update device status & statistics. */ -static int cycx_wan_update(struct wan_device *wandev) -{ - /* sanity checks */ - if (!wandev || !wandev->private) - return -EFAULT; - - if (wandev->state == WAN_UNCONFIGURED) - return -ENODEV; - - cycx_x25_get_stats(wandev->private); - - return 0; -} - -/* Create new logical channel. - * This routine is called by the router when ROUTER_IFNEW IOCTL is being - * handled. - * o parse media- and hardware-specific configuration - * o make sure that a new channel can be created - * o allocate resources, if necessary - * o prepare network device structure for registration. - * - * Return: 0 o.k. - * < 0 failure (channel will not be created) */ -static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev, - wanif_conf_t *conf) -{ - struct cycx_device *card = wandev->private; - struct cycx_x25_channel *chan; - int err = 0; - - if (!conf->name[0] || strlen(conf->name) > WAN_IFNAME_SZ) { - pr_info("%s: invalid interface name!\n", card->devname); - return -EINVAL; - } - - dev = alloc_netdev(sizeof(struct cycx_x25_channel), conf->name, - cycx_x25_chan_setup); - if (!dev) - return -ENOMEM; - - chan = netdev_priv(dev); - strcpy(chan->name, conf->name); - chan->card = card; - chan->link = conf->port; - chan->protocol = conf->protocol ? ETH_P_X25 : ETH_P_IP; - chan->rx_skb = NULL; - /* only used in svc connected thru crossover cable */ - chan->local_addr = NULL; - - if (conf->addr[0] == '@') { /* SVC */ - int len = strlen(conf->local_addr); - - if (len) { - if (len > WAN_ADDRESS_SZ) { - pr_err("%s: %s local addr too long!\n", - wandev->name, chan->name); - err = -EINVAL; - goto error; - } else { - chan->local_addr = kmalloc(len + 1, GFP_KERNEL); - - if (!chan->local_addr) { - err = -ENOMEM; - goto error; - } - } - - strncpy(chan->local_addr, conf->local_addr, - WAN_ADDRESS_SZ); - } - - chan->svc = 1; - strncpy(chan->addr, &conf->addr[1], WAN_ADDRESS_SZ); - init_timer(&chan->timer); - chan->timer.function = cycx_x25_chan_timer; - chan->timer.data = (unsigned long)dev; - - /* Set channel timeouts (default if not specified) */ - chan->idle_tmout = conf->idle_timeout ? conf->idle_timeout : 90; - } else if (isdigit(conf->addr[0])) { /* PVC */ - s16 lcn = dec_to_uint(conf->addr, 0); - - if (lcn >= card->u.x.lo_pvc && lcn <= card->u.x.hi_pvc) - chan->lcn = lcn; - else { - pr_err("%s: PVC %u is out of range on interface %s!\n", - wandev->name, lcn, chan->name); - err = -EINVAL; - goto error; - } - } else { - pr_err("%s: invalid media address on interface %s!\n", - wandev->name, chan->name); - err = -EINVAL; - goto error; - } - - return 0; - -error: - free_netdev(dev); - return err; -} - -/* Delete logical channel. */ -static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev) -{ - struct cycx_x25_channel *chan = netdev_priv(dev); - - if (chan->svc) { - kfree(chan->local_addr); - if (chan->state == WAN_CONNECTED) - del_timer(&chan->timer); - } - - return 0; -} - - -/* Network Device Interface */ - -static const struct header_ops cycx_header_ops = { - .create = cycx_netdevice_hard_header, - .rebuild = cycx_netdevice_rebuild_header, -}; - -static const struct net_device_ops cycx_netdev_ops = { - .ndo_init = cycx_netdevice_init, - .ndo_open = cycx_netdevice_open, - .ndo_stop = cycx_netdevice_stop, - .ndo_start_xmit = cycx_netdevice_hard_start_xmit, - .ndo_get_stats = cycx_netdevice_get_stats, -}; - -static void cycx_x25_chan_setup(struct net_device *dev) -{ - /* Initialize device driver entry points */ - dev->netdev_ops = &cycx_netdev_ops; - dev->header_ops = &cycx_header_ops; - - /* Initialize media-specific parameters */ - dev->mtu = CYCX_X25_CHAN_MTU; - dev->type = ARPHRD_HWX25; /* ARP h/w type */ - dev->hard_header_len = 0; /* media header length */ - dev->addr_len = 0; /* hardware address length */ -} - -/* Initialize Linux network interface. - * - * This routine is called only once for each interface, during Linux network - * interface registration. Returning anything but zero will fail interface - * registration. */ -static int cycx_netdevice_init(struct net_device *dev) -{ - struct cycx_x25_channel *chan = netdev_priv(dev); - struct cycx_device *card = chan->card; - struct wan_device *wandev = &card->wandev; - - if (!chan->svc) - *(__be16*)dev->dev_addr = htons(chan->lcn); - - /* Initialize hardware parameters (just for reference) */ - dev->irq = wandev->irq; - dev->dma = wandev->dma; - dev->base_addr = wandev->ioport; - dev->mem_start = (unsigned long)wandev->maddr; - dev->mem_end = (unsigned long)(wandev->maddr + - wandev->msize - 1); - dev->flags |= IFF_NOARP; - - /* Set transmit buffer queue length */ - dev->tx_queue_len = 10; - - /* Initialize socket buffers */ - cycx_x25_set_chan_state(dev, WAN_DISCONNECTED); - - return 0; -} - -/* Open network interface. - * o prevent module from unloading by incrementing use count - * o if link is disconnected then initiate connection - * - * Return 0 if O.k. or errno. */ -static int cycx_netdevice_open(struct net_device *dev) -{ - if (netif_running(dev)) - return -EBUSY; /* only one open is allowed */ - - netif_start_queue(dev); - return 0; -} - -/* Close network interface. - * o reset flags. - * o if there's no more open channels then disconnect physical link. */ -static int cycx_netdevice_stop(struct net_device *dev) -{ - struct cycx_x25_channel *chan = netdev_priv(dev); - - netif_stop_queue(dev); - - if (chan->state == WAN_CONNECTED || chan->state == WAN_CONNECTING) - cycx_x25_chan_disconnect(dev); - - return 0; -} - -/* Build media header. - * o encapsulate packet according to encapsulation type. - * - * The trick here is to put packet type (Ethertype) into 'protocol' field of - * the socket buffer, so that we don't forget it. If encapsulation fails, - * set skb->protocol to 0 and discard packet later. - * - * Return: media header length. */ -static int cycx_netdevice_hard_header(struct sk_buff *skb, - struct net_device *dev, u16 type, - const void *daddr, const void *saddr, - unsigned len) -{ - skb->protocol = htons(type); - - return dev->hard_header_len; -} - -/* * Re-build media header. - * Return: 1 physical address resolved. - * 0 physical address not resolved */ -static int cycx_netdevice_rebuild_header(struct sk_buff *skb) -{ - return 1; -} - -/* Send a packet on a network interface. - * o set busy flag (marks start of the transmission). - * o check link state. If link is not up, then drop the packet. - * o check channel status. If it's down then initiate a call. - * o pass a packet to corresponding WAN device. - * o free socket buffer - * - * Return: 0 complete (socket buffer must be freed) - * non-0 packet may be re-transmitted (tbusy must be set) - * - * Notes: - * 1. This routine is called either by the protocol stack or by the "net - * bottom half" (with interrupts enabled). - * 2. Setting tbusy flag will inhibit further transmit requests from the - * protocol stack and can be used for flow control with protocol layer. */ -static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct cycx_x25_channel *chan = netdev_priv(dev); - struct cycx_device *card = chan->card; - - if (!chan->svc) - chan->protocol = ntohs(skb->protocol); - - if (card->wandev.state != WAN_CONNECTED) - ++chan->ifstats.tx_dropped; - else if (chan->svc && chan->protocol && - chan->protocol != ntohs(skb->protocol)) { - pr_info("%s: unsupported Ethertype 0x%04X on interface %s!\n", - card->devname, ntohs(skb->protocol), dev->name); - ++chan->ifstats.tx_errors; - } else if (chan->protocol == ETH_P_IP) { - switch (chan->state) { - case WAN_DISCONNECTED: - if (cycx_x25_chan_connect(dev)) { - netif_stop_queue(dev); - return NETDEV_TX_BUSY; - } - /* fall thru */ - case WAN_CONNECTED: - reset_timer(dev); - dev->trans_start = jiffies; - netif_stop_queue(dev); - - if (cycx_x25_chan_send(dev, skb)) - return NETDEV_TX_BUSY; - - break; - default: - ++chan->ifstats.tx_dropped; - ++card->wandev.stats.tx_dropped; - } - } else { /* chan->protocol == ETH_P_X25 */ - switch (skb->data[0]) { - case X25_IFACE_DATA: - break; - case X25_IFACE_CONNECT: - cycx_x25_chan_connect(dev); - goto free_packet; - case X25_IFACE_DISCONNECT: - cycx_x25_chan_disconnect(dev); - goto free_packet; - default: - pr_info("%s: unknown %d x25-iface request on %s!\n", - card->devname, skb->data[0], dev->name); - ++chan->ifstats.tx_errors; - goto free_packet; - } - - skb_pull(skb, 1); /* Remove control byte */ - reset_timer(dev); - dev->trans_start = jiffies; - netif_stop_queue(dev); - - if (cycx_x25_chan_send(dev, skb)) { - /* prepare for future retransmissions */ - skb_push(skb, 1); - return NETDEV_TX_BUSY; - } - } - -free_packet: - dev_kfree_skb(skb); - - return NETDEV_TX_OK; -} - -/* Get Ethernet-style interface statistics. - * Return a pointer to struct net_device_stats */ -static struct net_device_stats *cycx_netdevice_get_stats(struct net_device *dev) -{ - struct cycx_x25_channel *chan = netdev_priv(dev); - - return chan ? &chan->ifstats : NULL; -} - -/* Interrupt Handlers */ -/* X.25 Interrupt Service Routine. */ -static void cycx_x25_irq_handler(struct cycx_device *card) -{ - struct cycx_x25_cmd cmd; - u16 z = 0; - - card->in_isr = 1; - card->buff_int_mode_unbusy = 0; - cycx_peek(&card->hw, X25_RXMBOX_OFFS, &cmd, sizeof(cmd)); - - switch (cmd.command) { - case X25_DATA_INDICATION: - cycx_x25_irq_rx(card, &cmd); - break; - case X25_ACK_FROM_VC: - cycx_x25_irq_tx(card, &cmd); - break; - case X25_LOG: - cycx_x25_irq_log(card, &cmd); - break; - case X25_STATISTIC: - cycx_x25_irq_stat(card, &cmd); - break; - case X25_CONNECT_CONFIRM: - cycx_x25_irq_connect_confirm(card, &cmd); - break; - case X25_CONNECT_INDICATION: - cycx_x25_irq_connect(card, &cmd); - break; - case X25_DISCONNECT_INDICATION: - cycx_x25_irq_disconnect(card, &cmd); - break; - case X25_DISCONNECT_CONFIRM: - cycx_x25_irq_disconnect_confirm(card, &cmd); - break; - case X25_LINE_ON: - cycx_set_state(card, WAN_CONNECTED); - break; - case X25_LINE_OFF: - cycx_set_state(card, WAN_DISCONNECTED); - break; - default: - cycx_x25_irq_spurious(card, &cmd); - break; - } - - cycx_poke(&card->hw, 0, &z, sizeof(z)); - cycx_poke(&card->hw, X25_RXMBOX_OFFS, &z, sizeof(z)); - card->in_isr = 0; -} - -/* Transmit interrupt handler. - * o Release socket buffer - * o Clear 'tbusy' flag */ -static void cycx_x25_irq_tx(struct cycx_device *card, struct cycx_x25_cmd *cmd) -{ - struct net_device *dev; - struct wan_device *wandev = &card->wandev; - u8 lcn; - - cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); - - /* unbusy device and then dev_tint(); */ - dev = cycx_x25_get_dev_by_lcn(wandev, lcn); - if (dev) { - card->buff_int_mode_unbusy = 1; - netif_wake_queue(dev); - } else - pr_err("%s:ackvc for inexistent lcn %d\n", card->devname, lcn); -} - -/* Receive interrupt handler. - * This routine handles fragmented IP packets using M-bit according to the - * RFC1356. - * o map logical channel number to network interface. - * o allocate socket buffer or append received packet to the existing one. - * o if M-bit is reset (i.e. it's the last packet in a sequence) then - * decapsulate packet and pass socket buffer to the protocol stack. - * - * Notes: - * 1. When allocating a socket buffer, if M-bit is set then more data is - * coming and we have to allocate buffer for the maximum IP packet size - * expected on this channel. - * 2. If something goes wrong and X.25 packet has to be dropped (e.g. no - * socket buffers available) the whole packet sequence must be discarded. */ -static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd) -{ - struct wan_device *wandev = &card->wandev; - struct net_device *dev; - struct cycx_x25_channel *chan; - struct sk_buff *skb; - u8 bitm, lcn; - int pktlen = cmd->len - 5; - - cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); - cycx_peek(&card->hw, cmd->buf + 4, &bitm, sizeof(bitm)); - bitm &= 0x10; - - dev = cycx_x25_get_dev_by_lcn(wandev, lcn); - if (!dev) { - /* Invalid channel, discard packet */ - pr_info("%s: receiving on orphaned LCN %d!\n", - card->devname, lcn); - return; - } - - chan = netdev_priv(dev); - reset_timer(dev); - - if (chan->drop_sequence) { - if (!bitm) - chan->drop_sequence = 0; - else - return; - } - - if ((skb = chan->rx_skb) == NULL) { - /* Allocate new socket buffer */ - int bufsize = bitm ? dev->mtu : pktlen; - - if ((skb = dev_alloc_skb((chan->protocol == ETH_P_X25 ? 1 : 0) + - bufsize + - dev->hard_header_len)) == NULL) { - pr_info("%s: no socket buffers available!\n", - card->devname); - chan->drop_sequence = 1; - ++chan->ifstats.rx_dropped; - return; - } - - if (chan->protocol == ETH_P_X25) /* X.25 socket layer control */ - /* 0 = data packet (dev_alloc_skb zeroed skb->data) */ - skb_put(skb, 1); - - skb->dev = dev; - skb->protocol = htons(chan->protocol); - chan->rx_skb = skb; - } - - if (skb_tailroom(skb) < pktlen) { - /* No room for the packet. Call off the whole thing! */ - dev_kfree_skb_irq(skb); - chan->rx_skb = NULL; - - if (bitm) - chan->drop_sequence = 1; - - pr_info("%s: unexpectedly long packet sequence on interface %s!\n", - card->devname, dev->name); - ++chan->ifstats.rx_length_errors; - return; - } - - /* Append packet to the socket buffer */ - cycx_peek(&card->hw, cmd->buf + 5, skb_put(skb, pktlen), pktlen); - - if (bitm) - return; /* more data is coming */ - - chan->rx_skb = NULL; /* dequeue packet */ - - ++chan->ifstats.rx_packets; - chan->ifstats.rx_bytes += pktlen; - - skb_reset_mac_header(skb); - netif_rx(skb); -} - -/* Connect interrupt handler. */ -static void cycx_x25_irq_connect(struct cycx_device *card, - struct cycx_x25_cmd *cmd) -{ - struct wan_device *wandev = &card->wandev; - struct net_device *dev = NULL; - struct cycx_x25_channel *chan; - u8 d[32], - loc[24], - rem[24]; - u8 lcn, sizeloc, sizerem; - - cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); - cycx_peek(&card->hw, cmd->buf + 5, &sizeloc, sizeof(sizeloc)); - cycx_peek(&card->hw, cmd->buf + 6, d, cmd->len - 6); - - sizerem = sizeloc >> 4; - sizeloc &= 0x0F; - - loc[0] = rem[0] = '\0'; - - if (sizeloc) - nibble_to_byte(d, loc, sizeloc, 0); - - if (sizerem) - nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1); - - dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n", - __func__, lcn, loc, rem); - - dev = cycx_x25_get_dev_by_dte_addr(wandev, rem); - if (!dev) { - /* Invalid channel, discard packet */ - pr_info("%s: connect not expected: remote %s!\n", - card->devname, rem); - return; - } - - chan = netdev_priv(dev); - chan->lcn = lcn; - cycx_x25_connect_response(card, chan); - cycx_x25_set_chan_state(dev, WAN_CONNECTED); -} - -/* Connect confirm interrupt handler. */ -static void cycx_x25_irq_connect_confirm(struct cycx_device *card, - struct cycx_x25_cmd *cmd) -{ - struct wan_device *wandev = &card->wandev; - struct net_device *dev; - struct cycx_x25_channel *chan; - u8 lcn, key; - - cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); - cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key)); - dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n", - card->devname, __func__, lcn, key); - - dev = cycx_x25_get_dev_by_lcn(wandev, -key); - if (!dev) { - /* Invalid channel, discard packet */ - clear_bit(--key, (void*)&card->u.x.connection_keys); - pr_info("%s: connect confirm not expected: lcn %d, key=%d!\n", - card->devname, lcn, key); - return; - } - - clear_bit(--key, (void*)&card->u.x.connection_keys); - chan = netdev_priv(dev); - chan->lcn = lcn; - cycx_x25_set_chan_state(dev, WAN_CONNECTED); -} - -/* Disconnect confirm interrupt handler. */ -static void cycx_x25_irq_disconnect_confirm(struct cycx_device *card, - struct cycx_x25_cmd *cmd) -{ - struct wan_device *wandev = &card->wandev; - struct net_device *dev; - u8 lcn; - - cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); - dprintk(1, KERN_INFO "%s: %s:lcn=%d\n", - card->devname, __func__, lcn); - dev = cycx_x25_get_dev_by_lcn(wandev, lcn); - if (!dev) { - /* Invalid channel, discard packet */ - pr_info("%s:disconnect confirm not expected!:lcn %d\n", - card->devname, lcn); - return; - } - - cycx_x25_set_chan_state(dev, WAN_DISCONNECTED); -} - -/* disconnect interrupt handler. */ -static void cycx_x25_irq_disconnect(struct cycx_device *card, - struct cycx_x25_cmd *cmd) -{ - struct wan_device *wandev = &card->wandev; - struct net_device *dev; - u8 lcn; - - cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); - dprintk(1, KERN_INFO "%s:lcn=%d\n", __func__, lcn); - - dev = cycx_x25_get_dev_by_lcn(wandev, lcn); - if (dev) { - struct cycx_x25_channel *chan = netdev_priv(dev); - - cycx_x25_disconnect_response(card, chan->link, lcn); - cycx_x25_set_chan_state(dev, WAN_DISCONNECTED); - } else - cycx_x25_disconnect_response(card, 0, lcn); -} - -/* LOG interrupt handler. */ -static void cycx_x25_irq_log(struct cycx_device *card, struct cycx_x25_cmd *cmd) -{ -#if CYCLOMX_X25_DEBUG - char bf[20]; - u16 size, toread, link, msg_code; - u8 code, routine; - - cycx_peek(&card->hw, cmd->buf, &msg_code, sizeof(msg_code)); - cycx_peek(&card->hw, cmd->buf + 2, &link, sizeof(link)); - cycx_peek(&card->hw, cmd->buf + 4, &size, sizeof(size)); - /* at most 20 bytes are available... thanks to Daniela :) */ - toread = size < 20 ? size : 20; - cycx_peek(&card->hw, cmd->buf + 10, &bf, toread); - cycx_peek(&card->hw, cmd->buf + 10 + toread, &code, 1); - cycx_peek(&card->hw, cmd->buf + 10 + toread + 1, &routine, 1); - - pr_info("cycx_x25_irq_handler: X25_LOG (0x4500) indic.:\n"); - pr_info("cmd->buf=0x%X\n", cmd->buf); - pr_info("Log message code=0x%X\n", msg_code); - pr_info("Link=%d\n", link); - pr_info("log code=0x%X\n", code); - pr_info("log routine=0x%X\n", routine); - pr_info("Message size=%d\n", size); - hex_dump("Message", bf, toread); -#endif -} - -/* STATISTIC interrupt handler. */ -static void cycx_x25_irq_stat(struct cycx_device *card, - struct cycx_x25_cmd *cmd) -{ - cycx_peek(&card->hw, cmd->buf, &card->u.x.stats, - sizeof(card->u.x.stats)); - hex_dump("cycx_x25_irq_stat", (unsigned char*)&card->u.x.stats, - sizeof(card->u.x.stats)); - cycx_x25_dump_stats(&card->u.x.stats); - wake_up_interruptible(&card->wait_stats); -} - -/* Spurious interrupt handler. - * o print a warning - * If number of spurious interrupts exceeded some limit, then ??? */ -static void cycx_x25_irq_spurious(struct cycx_device *card, - struct cycx_x25_cmd *cmd) -{ - pr_info("%s: spurious interrupt (0x%X)!\n", - card->devname, cmd->command); -} -#ifdef CYCLOMX_X25_DEBUG -static void hex_dump(char *msg, unsigned char *p, int len) -{ - print_hex_dump(KERN_INFO, msg, DUMP_PREFIX_OFFSET, 16, 1, - p, len, true); -} -#endif - -/* Cyclom 2X Firmware-Specific Functions */ -/* Exec X.25 command. */ -static int x25_exec(struct cycx_device *card, int command, int link, - void *d1, int len1, void *d2, int len2) -{ - struct cycx_x25_cmd c; - unsigned long flags; - u32 addr = 0x1200 + 0x2E0 * link + 0x1E2; - u8 retry = CYCX_X25_MAX_CMD_RETRY; - int err = 0; - - c.command = command; - c.link = link; - c.len = len1 + len2; - - spin_lock_irqsave(&card->u.x.lock, flags); - - /* write command */ - cycx_poke(&card->hw, X25_MBOX_OFFS, &c, sizeof(c) - sizeof(c.buf)); - - /* write X.25 data */ - if (d1) { - cycx_poke(&card->hw, addr, d1, len1); - - if (d2) { - if (len2 > 254) { - u32 addr1 = 0xA00 + 0x400 * link; - - cycx_poke(&card->hw, addr + len1, d2, 249); - cycx_poke(&card->hw, addr1, ((u8*)d2) + 249, - len2 - 249); - } else - cycx_poke(&card->hw, addr + len1, d2, len2); - } - } - - /* generate interruption, executing command */ - cycx_intr(&card->hw); - - /* wait till card->mbox == 0 */ - do { - err = cycx_exec(card->mbox); - } while (retry-- && err); - - spin_unlock_irqrestore(&card->u.x.lock, flags); - - return err; -} - -/* Configure adapter. */ -static int cycx_x25_configure(struct cycx_device *card, - struct cycx_x25_config *conf) -{ - struct { - u16 nlinks; - struct cycx_x25_config conf[2]; - } x25_cmd_conf; - - memset(&x25_cmd_conf, 0, sizeof(x25_cmd_conf)); - x25_cmd_conf.nlinks = 2; - x25_cmd_conf.conf[0] = *conf; - /* FIXME: we need to find a way in the wanrouter framework - to configure the second link, for now lets use it - with the same config from the first link, fixing - the interface type to RS232, the speed in 38400 and - the clock to external */ - x25_cmd_conf.conf[1] = *conf; - x25_cmd_conf.conf[1].link = 1; - x25_cmd_conf.conf[1].speed = 5; /* 38400 */ - x25_cmd_conf.conf[1].clock = 8; - x25_cmd_conf.conf[1].flags = 0; /* default = RS232 */ - - cycx_x25_dump_config(&x25_cmd_conf.conf[0]); - cycx_x25_dump_config(&x25_cmd_conf.conf[1]); - - return x25_exec(card, X25_CONFIG, 0, - &x25_cmd_conf, sizeof(x25_cmd_conf), NULL, 0); -} - -/* Get protocol statistics. */ -static int cycx_x25_get_stats(struct cycx_device *card) -{ - /* the firmware expects 20 in the size field!!! - thanks to Daniela */ - int err = x25_exec(card, X25_STATISTIC, 0, NULL, 20, NULL, 0); - - if (err) - return err; - - interruptible_sleep_on(&card->wait_stats); - - if (signal_pending(current)) - return -EINTR; - - card->wandev.stats.rx_packets = card->u.x.stats.n2_rx_frames; - card->wandev.stats.rx_over_errors = card->u.x.stats.rx_over_errors; - card->wandev.stats.rx_crc_errors = card->u.x.stats.rx_crc_errors; - card->wandev.stats.rx_length_errors = 0; /* not available from fw */ - card->wandev.stats.rx_frame_errors = 0; /* not available from fw */ - card->wandev.stats.rx_missed_errors = card->u.x.stats.rx_aborts; - card->wandev.stats.rx_dropped = 0; /* not available from fw */ - card->wandev.stats.rx_errors = 0; /* not available from fw */ - card->wandev.stats.tx_packets = card->u.x.stats.n2_tx_frames; - card->wandev.stats.tx_aborted_errors = card->u.x.stats.tx_aborts; - card->wandev.stats.tx_dropped = 0; /* not available from fw */ - card->wandev.stats.collisions = 0; /* not available from fw */ - card->wandev.stats.tx_errors = 0; /* not available from fw */ - - cycx_x25_dump_devs(&card->wandev); - - return 0; -} - -/* return the number of nibbles */ -static int byte_to_nibble(u8 *s, u8 *d, char *nibble) -{ - int i = 0; - - if (*nibble && *s) { - d[i] |= *s++ - '0'; - *nibble = 0; - ++i; - } - - while (*s) { - d[i] = (*s - '0') << 4; - if (*(s + 1)) - d[i] |= *(s + 1) - '0'; - else { - *nibble = 1; - break; - } - ++i; - s += 2; - } - - return i; -} - -static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble) -{ - if (nibble) { - *d++ = '0' + (*s++ & 0x0F); - --len; - } - - while (len) { - *d++ = '0' + (*s >> 4); - - if (--len) { - *d++ = '0' + (*s & 0x0F); - --len; - } else break; - - ++s; - } - - *d = '\0'; -} - -/* Place X.25 call. */ -static int x25_place_call(struct cycx_device *card, - struct cycx_x25_channel *chan) -{ - int err = 0, - len; - char d[64], - nibble = 0, - mylen = chan->local_addr ? strlen(chan->local_addr) : 0, - remotelen = strlen(chan->addr); - u8 key; - - if (card->u.x.connection_keys == ~0U) { - pr_info("%s: too many simultaneous connection requests!\n", - card->devname); - return -EAGAIN; - } - - key = ffz(card->u.x.connection_keys); - set_bit(key, (void*)&card->u.x.connection_keys); - ++key; - dprintk(1, KERN_INFO "%s:x25_place_call:key=%d\n", card->devname, key); - memset(d, 0, sizeof(d)); - d[1] = key; /* user key */ - d[2] = 0x10; - d[4] = 0x0B; - - len = byte_to_nibble(chan->addr, d + 6, &nibble); - - if (chan->local_addr) - len += byte_to_nibble(chan->local_addr, d + 6 + len, &nibble); - - if (nibble) - ++len; - - d[5] = mylen << 4 | remotelen; - d[6 + len + 1] = 0xCC; /* TCP/IP over X.25, thanks to Daniela :) */ - - if ((err = x25_exec(card, X25_CONNECT_REQUEST, chan->link, - &d, 7 + len + 1, NULL, 0)) != 0) - clear_bit(--key, (void*)&card->u.x.connection_keys); - else - chan->lcn = -key; - - return err; -} - -/* Place X.25 CONNECT RESPONSE. */ -static int cycx_x25_connect_response(struct cycx_device *card, - struct cycx_x25_channel *chan) -{ - u8 d[8]; - - memset(d, 0, sizeof(d)); - d[0] = d[3] = chan->lcn; - d[2] = 0x10; - d[4] = 0x0F; - d[7] = 0xCC; /* TCP/IP over X.25, thanks Daniela */ - - return x25_exec(card, X25_CONNECT_RESPONSE, chan->link, &d, 8, NULL, 0); -} - -/* Place X.25 DISCONNECT RESPONSE. */ -static int cycx_x25_disconnect_response(struct cycx_device *card, u8 link, - u8 lcn) -{ - char d[5]; - - memset(d, 0, sizeof(d)); - d[0] = d[3] = lcn; - d[2] = 0x10; - d[4] = 0x17; - - return x25_exec(card, X25_DISCONNECT_RESPONSE, link, &d, 5, NULL, 0); -} - -/* Clear X.25 call. */ -static int x25_clear_call(struct cycx_device *card, u8 link, u8 lcn, u8 cause, - u8 diagn) -{ - u8 d[7]; - - memset(d, 0, sizeof(d)); - d[0] = d[3] = lcn; - d[2] = 0x10; - d[4] = 0x13; - d[5] = cause; - d[6] = diagn; - - return x25_exec(card, X25_DISCONNECT_REQUEST, link, d, 7, NULL, 0); -} - -/* Send X.25 data packet. */ -static int cycx_x25_send(struct cycx_device *card, u8 link, u8 lcn, u8 bitm, - int len, void *buf) -{ - u8 d[] = "?\xFF\x10??"; - - d[0] = d[3] = lcn; - d[4] = bitm; - - return x25_exec(card, X25_DATA_REQUEST, link, &d, 5, buf, len); -} - -/* Miscellaneous */ -/* Find network device by its channel number. */ -static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev, - s16 lcn) -{ - struct net_device *dev = wandev->dev; - struct cycx_x25_channel *chan; - - while (dev) { - chan = netdev_priv(dev); - - if (chan->lcn == lcn) - break; - dev = chan->slave; - } - return dev; -} - -/* Find network device by its remote dte address. */ -static struct net_device * - cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte) -{ - struct net_device *dev = wandev->dev; - struct cycx_x25_channel *chan; - - while (dev) { - chan = netdev_priv(dev); - - if (!strcmp(chan->addr, dte)) - break; - dev = chan->slave; - } - return dev; -} - -/* Initiate connection on the logical channel. - * o for PVC we just get channel configuration - * o for SVCs place an X.25 call - * - * Return: 0 connected - * >0 connection in progress - * <0 failure */ -static int cycx_x25_chan_connect(struct net_device *dev) -{ - struct cycx_x25_channel *chan = netdev_priv(dev); - struct cycx_device *card = chan->card; - - if (chan->svc) { - if (!chan->addr[0]) - return -EINVAL; /* no destination address */ - - dprintk(1, KERN_INFO "%s: placing X.25 call to %s...\n", - card->devname, chan->addr); - - if (x25_place_call(card, chan)) - return -EIO; - - cycx_x25_set_chan_state(dev, WAN_CONNECTING); - return 1; - } else - cycx_x25_set_chan_state(dev, WAN_CONNECTED); - - return 0; -} - -/* Disconnect logical channel. - * o if SVC then clear X.25 call */ -static void cycx_x25_chan_disconnect(struct net_device *dev) -{ - struct cycx_x25_channel *chan = netdev_priv(dev); - - if (chan->svc) { - x25_clear_call(chan->card, chan->link, chan->lcn, 0, 0); - cycx_x25_set_chan_state(dev, WAN_DISCONNECTING); - } else - cycx_x25_set_chan_state(dev, WAN_DISCONNECTED); -} - -/* Called by kernel timer */ -static void cycx_x25_chan_timer(unsigned long d) -{ - struct net_device *dev = (struct net_device *)d; - struct cycx_x25_channel *chan = netdev_priv(dev); - - if (chan->state == WAN_CONNECTED) - cycx_x25_chan_disconnect(dev); - else - pr_err("%s: %s for svc (%s) not connected!\n", - chan->card->devname, __func__, dev->name); -} - -/* Set logical channel state. */ -static void cycx_x25_set_chan_state(struct net_device *dev, u8 state) -{ - struct cycx_x25_channel *chan = netdev_priv(dev); - struct cycx_device *card = chan->card; - unsigned long flags; - char *string_state = NULL; - - spin_lock_irqsave(&card->lock, flags); - - if (chan->state != state) { - if (chan->svc && chan->state == WAN_CONNECTED) - del_timer(&chan->timer); - - switch (state) { - case WAN_CONNECTED: - string_state = "connected!"; - *(__be16*)dev->dev_addr = htons(chan->lcn); - netif_wake_queue(dev); - reset_timer(dev); - - if (chan->protocol == ETH_P_X25) - cycx_x25_chan_send_event(dev, - X25_IFACE_CONNECT); - - break; - case WAN_CONNECTING: - string_state = "connecting..."; - break; - case WAN_DISCONNECTING: - string_state = "disconnecting..."; - break; - case WAN_DISCONNECTED: - string_state = "disconnected!"; - - if (chan->svc) { - *(unsigned short*)dev->dev_addr = 0; - chan->lcn = 0; - } - - if (chan->protocol == ETH_P_X25) - cycx_x25_chan_send_event(dev, - X25_IFACE_DISCONNECT); - - netif_wake_queue(dev); - break; - } - - pr_info("%s: interface %s %s\n", - card->devname, dev->name, string_state); - chan->state = state; - } - - spin_unlock_irqrestore(&card->lock, flags); -} - -/* Send packet on a logical channel. - * When this function is called, tx_skb field of the channel data space - * points to the transmit socket buffer. When transmission is complete, - * release socket buffer and reset 'tbusy' flag. - * - * Return: 0 - transmission complete - * 1 - busy - * - * Notes: - * 1. If packet length is greater than MTU for this channel, we'll fragment - * the packet into 'complete sequence' using M-bit. - * 2. When transmission is complete, an event notification should be issued - * to the router. */ -static int cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb) -{ - struct cycx_x25_channel *chan = netdev_priv(dev); - struct cycx_device *card = chan->card; - int bitm = 0; /* final packet */ - unsigned len = skb->len; - - if (skb->len > card->wandev.mtu) { - len = card->wandev.mtu; - bitm = 0x10; /* set M-bit (more data) */ - } - - if (cycx_x25_send(card, chan->link, chan->lcn, bitm, len, skb->data)) - return 1; - - if (bitm) { - skb_pull(skb, len); - return 1; - } - - ++chan->ifstats.tx_packets; - chan->ifstats.tx_bytes += len; - - return 0; -} - -/* Send event (connection, disconnection, etc) to X.25 socket layer */ - -static void cycx_x25_chan_send_event(struct net_device *dev, u8 event) -{ - struct sk_buff *skb; - unsigned char *ptr; - - if ((skb = dev_alloc_skb(1)) == NULL) { - pr_err("%s: out of memory\n", __func__); - return; - } - - ptr = skb_put(skb, 1); - *ptr = event; - - skb->protocol = x25_type_trans(skb, dev); - netif_rx(skb); -} - -/* Convert line speed in bps to a number used by cyclom 2x code. */ -static u8 bps_to_speed_code(u32 bps) -{ - u8 number = 0; /* defaults to the lowest (1200) speed ;> */ - - if (bps >= 512000) number = 8; - else if (bps >= 256000) number = 7; - else if (bps >= 64000) number = 6; - else if (bps >= 38400) number = 5; - else if (bps >= 19200) number = 4; - else if (bps >= 9600) number = 3; - else if (bps >= 4800) number = 2; - else if (bps >= 2400) number = 1; - - return number; -} - -/* log base 2 */ -static u8 cycx_log2(u32 n) -{ - u8 log = 0; - - if (!n) - return 0; - - while (n > 1) { - n >>= 1; - ++log; - } - - return log; -} - -/* Convert decimal string to unsigned integer. - * If len != 0 then only 'len' characters of the string are converted. */ -static unsigned dec_to_uint(u8 *str, int len) -{ - unsigned val = 0; - - if (!len) - len = strlen(str); - - for (; len && isdigit(*str); ++str, --len) - val = (val * 10) + (*str - (unsigned) '0'); - - return val; -} - -static void reset_timer(struct net_device *dev) -{ - struct cycx_x25_channel *chan = netdev_priv(dev); - - if (chan->svc) - mod_timer(&chan->timer, jiffies+chan->idle_tmout*HZ); -} -#ifdef CYCLOMX_X25_DEBUG -static void cycx_x25_dump_config(struct cycx_x25_config *conf) -{ - pr_info("X.25 configuration\n"); - pr_info("-----------------\n"); - pr_info("link number=%d\n", conf->link); - pr_info("line speed=%d\n", conf->speed); - pr_info("clock=%sternal\n", conf->clock == 8 ? "Ex" : "In"); - pr_info("# level 2 retransm.=%d\n", conf->n2); - pr_info("level 2 window=%d\n", conf->n2win); - pr_info("level 3 window=%d\n", conf->n3win); - pr_info("# logical channels=%d\n", conf->nvc); - pr_info("level 3 pkt len=%d\n", conf->pktlen); - pr_info("my address=%d\n", conf->locaddr); - pr_info("remote address=%d\n", conf->remaddr); - pr_info("t1=%d seconds\n", conf->t1); - pr_info("t2=%d seconds\n", conf->t2); - pr_info("t21=%d seconds\n", conf->t21); - pr_info("# PVCs=%d\n", conf->npvc); - pr_info("t23=%d seconds\n", conf->t23); - pr_info("flags=0x%x\n", conf->flags); -} - -static void cycx_x25_dump_stats(struct cycx_x25_stats *stats) -{ - pr_info("X.25 statistics\n"); - pr_info("--------------\n"); - pr_info("rx_crc_errors=%d\n", stats->rx_crc_errors); - pr_info("rx_over_errors=%d\n", stats->rx_over_errors); - pr_info("n2_tx_frames=%d\n", stats->n2_tx_frames); - pr_info("n2_rx_frames=%d\n", stats->n2_rx_frames); - pr_info("tx_timeouts=%d\n", stats->tx_timeouts); - pr_info("rx_timeouts=%d\n", stats->rx_timeouts); - pr_info("n3_tx_packets=%d\n", stats->n3_tx_packets); - pr_info("n3_rx_packets=%d\n", stats->n3_rx_packets); - pr_info("tx_aborts=%d\n", stats->tx_aborts); - pr_info("rx_aborts=%d\n", stats->rx_aborts); -} - -static void cycx_x25_dump_devs(struct wan_device *wandev) -{ - struct net_device *dev = wandev->dev; - - pr_info("X.25 dev states\n"); - pr_info("name: addr: txoff: protocol:\n"); - pr_info("---------------------------------------\n"); - - while(dev) { - struct cycx_x25_channel *chan = netdev_priv(dev); - - pr_info("%-5.5s %-15.15s %d ETH_P_%s\n", - chan->name, chan->addr, netif_queue_stopped(dev), - chan->protocol == ETH_P_IP ? "IP" : "X25"); - dev = chan->slave; - } -} - -#endif /* CYCLOMX_X25_DEBUG */ -/* End */ diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 56941d6547eb..3f0c4f268751 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2448,11 +2448,9 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) } /* Allocate driver private data */ - card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL); - if (card == NULL) { - pr_err("FarSync card found but insufficient memory for driver storage\n"); + card = kzalloc(sizeof(struct fst_card_info), GFP_KERNEL); + if (card == NULL) return -ENOMEM; - } /* Try to enable the device */ if ((err = pci_enable_device(pdev)) != 0) { diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index 10cc7df95498..a0a932c63d0a 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c @@ -280,14 +280,13 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, if (!try_module_get(proto->module)) return -ENOSYS; - if (size) - if ((dev_to_hdlc(dev)->state = kmalloc(size, - GFP_KERNEL)) == NULL) { - netdev_warn(dev, - "Memory squeeze on hdlc_proto_attach()\n"); + if (size) { + dev_to_hdlc(dev)->state = kmalloc(size, GFP_KERNEL); + if (dev_to_hdlc(dev)->state == NULL) { module_put(proto->module); return -ENOBUFS; } + } dev_to_hdlc(dev)->proto = proto; return 0; } diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 44db8b75a531..5895f1978691 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -128,7 +128,6 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu) rbuff = kmalloc(len + 4, GFP_ATOMIC); if (xbuff == NULL || rbuff == NULL) { - netdev_warn(dev, "unable to grow X.25 buffers, MTU change cancelled\n"); kfree(xbuff); kfree(rbuff); return -ENOMEM; diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index def12b38cbf7..c9c711dcd0e6 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -1055,7 +1055,6 @@ int i2400m_read_mac_addr(struct i2400m *i2400m) result = 0; } net_dev->addr_len = ETH_ALEN; - memcpy(net_dev->perm_addr, ack_buf.ack_pl, ETH_ALEN); memcpy(net_dev->dev_addr, ack_buf.ack_pl, ETH_ALEN); error_read_mac: d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, result); diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c index 530581ca0191..48896138418f 100644 --- a/drivers/net/wimax/i2400m/netdev.c +++ b/drivers/net/wimax/i2400m/netdev.c @@ -599,12 +599,12 @@ static void i2400m_get_drvinfo(struct net_device *net_dev, { struct i2400m *i2400m = net_dev_to_i2400m(net_dev); - strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1); - strncpy(info->fw_version, - i2400m->fw_name ? : "", sizeof(info->fw_version) - 1); + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->fw_version, i2400m->fw_name ? : "", + sizeof(info->fw_version)); if (net_dev->dev.parent) - strncpy(info->bus_info, dev_name(net_dev->dev.parent), - sizeof(info->bus_info) - 1); + strlcpy(info->bus_info, dev_name(net_dev->dev.parent), + sizeof(info->bus_info)); } static const struct ethtool_ops i2400m_ethtool_ops = { diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index 37becfcc98f2..0b602951ff6b 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c @@ -1346,29 +1346,22 @@ EXPORT_SYMBOL(i2400m_unknown_barker); int i2400m_rx_setup(struct i2400m *i2400m) { int result = 0; - struct device *dev = i2400m_dev(i2400m); i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1; if (i2400m->rx_reorder) { unsigned itr; - size_t size; struct i2400m_roq_log *rd; result = -ENOMEM; - size = sizeof(i2400m->rx_roq[0]) * (I2400M_RO_CIN + 1); - i2400m->rx_roq = kzalloc(size, GFP_KERNEL); - if (i2400m->rx_roq == NULL) { - dev_err(dev, "RX: cannot allocate %zu bytes for " - "reorder queues\n", size); + i2400m->rx_roq = kcalloc(I2400M_RO_CIN + 1, + sizeof(i2400m->rx_roq[0]), GFP_KERNEL); + if (i2400m->rx_roq == NULL) goto error_roq_alloc; - } - size = sizeof(*i2400m->rx_roq[0].log) * (I2400M_RO_CIN + 1); - rd = kzalloc(size, GFP_KERNEL); + rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log), + GFP_KERNEL); if (rd == NULL) { - dev_err(dev, "RX: cannot allocate %zu bytes for " - "reorder queues log areas\n", size); result = -ENOMEM; goto error_roq_log_alloc; } diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c index d44b545f4082..fc1355d98bc6 100644 --- a/drivers/net/wimax/i2400m/usb-notif.c +++ b/drivers/net/wimax/i2400m/usb-notif.c @@ -199,7 +199,6 @@ int i2400mu_notification_setup(struct i2400mu *i2400mu) d_fnstart(4, dev, "(i2400m %p)\n", i2400mu); buf = kmalloc(I2400MU_MAX_NOTIFICATION_LEN, GFP_KERNEL | GFP_DMA); if (buf == NULL) { - dev_err(dev, "notification: buffer allocation failed\n"); ret = -ENOMEM; goto error_buf_alloc; } diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 080f36303a4f..cd15a93d9084 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -346,9 +346,9 @@ static void i2400mu_get_drvinfo(struct net_device *net_dev, struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m); struct usb_device *udev = i2400mu->usb_dev; - strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1); - strncpy(info->fw_version, - i2400m->fw_name ? : "", sizeof(info->fw_version) - 1); + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->fw_version, i2400m->fw_name ? : "", + sizeof(info->fw_version)); usb_make_path(udev, info->bus_info, sizeof(info->bus_info)); } diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c index 630577dd3a7a..956024a636e6 100644 --- a/drivers/net/wireless/airo_cs.c +++ b/drivers/net/wireless/airo_cs.c @@ -69,10 +69,9 @@ static int airo_probe(struct pcmcia_device *p_dev) /* Allocate space for private device-specific data */ local = kzalloc(sizeof(local_info_t), GFP_KERNEL); - if (!local) { - printk(KERN_ERR "airo_cs: no memory for new device\n"); + if (!local) return -ENOMEM; - } + p_dev->priv = local; return airo_config(p_dev); diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index 77fa4286e5e9..5ac5f7ae2721 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -2164,10 +2164,8 @@ static int at76_alloc_urbs(struct at76_priv *priv, buffer_size = sizeof(struct at76_tx_buffer) + MAX_PADDING_SIZE; priv->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL); - if (!priv->bulk_out_buffer) { - dev_err(&interface->dev, "cannot allocate output buffer\n"); + if (!priv->bulk_out_buffer) return -ENOMEM; - } at76_dbg(DBG_PROC_ENTRY, "%s: EXIT", __func__); diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 30ca0a60a64c..1d264c0f5a9b 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -240,13 +240,14 @@ static const struct ath_ops ath5k_common_ops = { * Driver Initialization * \***********************/ -static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) +static void ath5k_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct ath5k_hw *ah = hw->priv; struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah); - return ath_reg_notifier_apply(wiphy, request, regulatory); + ath_reg_notifier_apply(wiphy, request, regulatory); } /********************\ diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index ab363f34b4df..a78afa98c650 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -1613,6 +1613,10 @@ ath5k_hw_update_noise_floor(struct ath5k_hw *ah) ah->ah_cal_mask |= AR5K_CALIBRATION_NF; ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel); + if (WARN_ON(ee_mode < 0)) { + ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF; + return; + } /* completed NF calibration, test threshold */ nf = ath5k_hw_read_measured_noise_floor(ah); diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c index 4084b1076286..e2d8b2cf19eb 100644 --- a/drivers/net/wireless/ath/ath5k/reset.c +++ b/drivers/net/wireless/ath/ath5k/reset.c @@ -985,6 +985,8 @@ ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, return; ee_mode = ath5k_eeprom_mode_from_channel(channel); + if (WARN_ON(ee_mode < 0)) + return; /* Adjust power delta for channel 14 */ if (channel->center_freq == 2484) diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 5516a8ccc3c6..752ffc4f4166 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -427,6 +427,30 @@ static bool ath6kl_is_tx_pending(struct ath6kl *ar) return ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0; } +static void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, + bool enable) +{ + int err; + + if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag))) + return; + + if (vif->nw_type != INFRA_NETWORK) + return; + + if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE, + vif->ar->fw_capabilities)) + return; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n", + enable ? "enable" : "disable"); + + err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi, + vif->fw_vif_idx, enable); + if (err) + ath6kl_err("failed to %s enhanced bmiss detection: %d\n", + enable ? "enable" : "disable", err); +} static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) @@ -616,13 +640,13 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, vif->req_bssid, vif->ch_hint, ar->connect_ctrl_flags, nw_subtype); - /* disable background scan if period is 0 */ - if (sme->bg_scan_period == 0) + if (sme->bg_scan_period == 0) { + /* disable background scan if period is 0 */ sme->bg_scan_period = 0xffff; - - /* configure default value if not specified */ - if (sme->bg_scan_period == -1) + } else if (sme->bg_scan_period == -1) { + /* configure default value if not specified */ sme->bg_scan_period = DEFAULT_BG_SCAN_PERIOD; + } ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0, sme->bg_scan_period, 0, 0, 0, 3, 0, 0, 0); @@ -767,7 +791,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n", nw_type & ADHOC_CREATOR ? "creator" : "joiner"); cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL); - cfg80211_put_bss(bss); + cfg80211_put_bss(ar->wiphy, bss); return; } @@ -778,7 +802,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, assoc_req_ie, assoc_req_len, assoc_resp_ie, assoc_resp_len, WLAN_STATUS_SUCCESS, GFP_KERNEL); - cfg80211_put_bss(bss); + cfg80211_put_bss(ar->wiphy, bss); } else if (vif->sme_state == SME_CONNECTED) { /* inform roam event to cfg80211 */ cfg80211_roamed_bss(vif->ndev, bss, assoc_req_ie, assoc_req_len, @@ -1454,10 +1478,10 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy, return -EIO; if (pmgmt) { - ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__); + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__); mode.pwr_mode = REC_POWER; } else { - ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__); + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__); mode.pwr_mode = MAX_PERF_POWER; } @@ -1509,7 +1533,7 @@ static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy, list_del(&vif->list); spin_unlock_bh(&ar->list_lock); - ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag)); + ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag)); ath6kl_cfg80211_vif_cleanup(vif); @@ -1559,17 +1583,13 @@ static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy, set_iface_type: switch (type) { case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: vif->next_mode = INFRA_NETWORK; break; case NL80211_IFTYPE_ADHOC: vif->next_mode = ADHOC_NETWORK; break; case NL80211_IFTYPE_AP: - vif->next_mode = AP_NETWORK; - break; - case NL80211_IFTYPE_P2P_CLIENT: - vif->next_mode = INFRA_NETWORK; - break; case NL80211_IFTYPE_P2P_GO: vif->next_mode = AP_NETWORK; break; @@ -1778,14 +1798,14 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, if (vif->target_stats.rx_byte) { sinfo->rx_bytes = vif->target_stats.rx_byte; - sinfo->filled |= STATION_INFO_RX_BYTES; + sinfo->filled |= STATION_INFO_RX_BYTES64; sinfo->rx_packets = vif->target_stats.rx_pkt; sinfo->filled |= STATION_INFO_RX_PACKETS; } if (vif->target_stats.tx_byte) { sinfo->tx_bytes = vif->target_stats.tx_byte; - sinfo->filled |= STATION_INFO_TX_BYTES; + sinfo->filled |= STATION_INFO_TX_BYTES64; sinfo->tx_packets = vif->target_stats.tx_pkt; sinfo->filled |= STATION_INFO_TX_PACKETS; } @@ -2673,30 +2693,6 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif, return 0; } -void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable) -{ - int err; - - if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag))) - return; - - if (vif->nw_type != INFRA_NETWORK) - return; - - if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE, - vif->ar->fw_capabilities)) - return; - - ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n", - enable ? "enable" : "disable"); - - err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi, - vif->fw_vif_idx, enable); - if (err) - ath6kl_err("failed to %s enhanced bmiss detection: %d\n", - enable ? "enable" : "disable", err); -} - static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon, u8 *rsn_capab) { @@ -2776,9 +2772,11 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev, ar->ap_mode_bkey.valid = false; - /* TODO: - * info->interval - */ + ret = ath6kl_wmi_ap_set_beacon_intvl_cmd(ar->wmi, vif->fw_vif_idx, + info->beacon_interval); + + if (ret) + ath6kl_warn("Failed to set beacon interval: %d\n", ret); ret = ath6kl_wmi_ap_set_dtim_cmd(ar->wmi, vif->fw_vif_idx, info->dtim_period); @@ -3492,8 +3490,8 @@ void ath6kl_cfg80211_stop_all(struct ath6kl *ar) ath6kl_cfg80211_stop(vif); } -static int ath6kl_cfg80211_reg_notify(struct wiphy *wiphy, - struct regulatory_request *request) +static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy, + struct regulatory_request *request) { struct ath6kl *ar = wiphy_priv(wiphy); u32 rates[IEEE80211_NUM_BANDS]; @@ -3506,17 +3504,13 @@ static int ath6kl_cfg80211_reg_notify(struct wiphy *wiphy, request->processed ? " processed" : "", request->initiator, request->user_reg_hint_type); - /* - * As firmware is not able intersect regdoms, we can only listen to - * cellular hints. - */ if (request->user_reg_hint_type != NL80211_USER_REG_HINT_CELL_BASE) - return -EOPNOTSUPP; + return; ret = ath6kl_wmi_set_regdomain_cmd(ar->wmi, request->alpha2); if (ret) { ath6kl_err("failed to set regdomain: %d\n", ret); - return ret; + return; } /* @@ -3536,10 +3530,8 @@ static int ath6kl_cfg80211_reg_notify(struct wiphy *wiphy, if (ret) { ath6kl_err("failed to start scan for a regdomain change: %d\n", ret); - return ret; + return; } - - return 0; } static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif) @@ -3563,6 +3555,37 @@ static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif) return 0; } +void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready) +{ + static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + bool discon_issued; + + netif_stop_queue(vif->ndev); + + clear_bit(WLAN_ENABLED, &vif->flags); + + if (wmi_ready) { + discon_issued = test_bit(CONNECTED, &vif->flags) || + test_bit(CONNECT_PEND, &vif->flags); + ath6kl_disconnect(vif); + del_timer(&vif->disconnect_timer); + + if (discon_issued) + ath6kl_disconnect_event(vif, DISCONNECT_CMD, + (vif->nw_type & AP_NETWORK) ? + bcast_mac : vif->bssid, + 0, NULL, 0); + } + + if (vif->scan_req) { + cfg80211_scan_done(vif->scan_req, true); + vif->scan_req = NULL; + } + + /* need to clean up enhanced bmiss detection fw state */ + ath6kl_cfg80211_sta_bmiss_enhance(vif, false); +} + void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif) { struct ath6kl *ar = vif->ar; diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h index e5e70f3a8ca8..b59becd91aea 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.h +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h @@ -61,7 +61,5 @@ void ath6kl_cfg80211_cleanup(struct ath6kl *ar); struct ath6kl *ath6kl_cfg80211_create(void); void ath6kl_cfg80211_destroy(struct ath6kl *ar); -/* TODO: remove this once ath6kl_vif_cleanup() is moved to cfg80211.c */ -void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable); #endif /* ATH6KL_CFG80211_H */ diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h index 189d8faf8c87..61b2f98b4e77 100644 --- a/drivers/net/wireless/ath/ath6kl/core.h +++ b/drivers/net/wireless/ath/ath6kl/core.h @@ -940,7 +940,7 @@ void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, bool wait_fot_compltn, bool cold_reset); void ath6kl_init_control_info(struct ath6kl_vif *vif); struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar); -void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready); +void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready); int ath6kl_init_hw_start(struct ath6kl *ar); int ath6kl_init_hw_stop(struct ath6kl *ar); int ath6kl_init_fetch_firmwares(struct ath6kl *ar); diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c index ba6bd497b787..281390178e3d 100644 --- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c +++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c @@ -509,9 +509,7 @@ static void destroy_htc_txctrl_packet(struct htc_packet *packet) { struct sk_buff *skb; skb = packet->skb; - if (skb != NULL) - dev_kfree_skb(skb); - + dev_kfree_skb(skb); kfree(packet); } @@ -969,6 +967,22 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u16 payload_len; int status = 0; + /* + * ar->htc_target can be NULL due to a race condition that can occur + * during driver initialization(we do 'ath6kl_hif_power_on' before + * initializing 'ar->htc_target' via 'ath6kl_htc_create'). + * 'ath6kl_hif_power_on' assigns 'ath6kl_recv_complete' as + * usb_complete_t/callback function for 'usb_fill_bulk_urb'. + * Thus the possibility of ar->htc_target being NULL + * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work. + */ + if (WARN_ON_ONCE(!target)) { + ath6kl_err("Target not yet initialized\n"); + status = -EINVAL; + goto free_skb; + } + + netdata = skb->data; netlen = skb->len; @@ -1054,6 +1068,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb, dev_kfree_skb(skb); skb = NULL; + goto free_skb; } @@ -1089,8 +1104,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb, skb = NULL; free_skb: - if (skb != NULL) - dev_kfree_skb(skb); + dev_kfree_skb(skb); return status; @@ -1184,7 +1198,7 @@ static void reset_endpoint_states(struct htc_target *target) INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue); INIT_LIST_HEAD(&ep->rx_bufq); ep->target = target; - ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */ + ep->pipe.tx_credit_flow_enabled = true; } } diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index f21fa322e5ca..5d434cf88f35 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -1715,38 +1715,6 @@ void ath6kl_init_hw_restart(struct ath6kl *ar) } } -/* FIXME: move this to cfg80211.c and rename to ath6kl_cfg80211_vif_stop() */ -void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready) -{ - static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - bool discon_issued; - - netif_stop_queue(vif->ndev); - - clear_bit(WLAN_ENABLED, &vif->flags); - - if (wmi_ready) { - discon_issued = test_bit(CONNECTED, &vif->flags) || - test_bit(CONNECT_PEND, &vif->flags); - ath6kl_disconnect(vif); - del_timer(&vif->disconnect_timer); - - if (discon_issued) - ath6kl_disconnect_event(vif, DISCONNECT_CMD, - (vif->nw_type & AP_NETWORK) ? - bcast_mac : vif->bssid, - 0, NULL, 0); - } - - if (vif->scan_req) { - cfg80211_scan_done(vif->scan_req, true); - vif->scan_req = NULL; - } - - /* need to clean up enhanced bmiss detection fw state */ - ath6kl_cfg80211_sta_bmiss_enhance(vif, false); -} - void ath6kl_stop_txrx(struct ath6kl *ar) { struct ath6kl_vif *vif, *tmp_vif; @@ -1766,7 +1734,7 @@ void ath6kl_stop_txrx(struct ath6kl *ar) list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) { list_del(&vif->list); spin_unlock_bh(&ar->list_lock); - ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag)); + ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag)); rtnl_lock(); ath6kl_cfg80211_vif_cleanup(vif); rtnl_unlock(); @@ -1801,8 +1769,6 @@ void ath6kl_stop_txrx(struct ath6kl *ar) "attempting to reset target on instance destroy\n"); ath6kl_reset_device(ar, ar->target_type, true, true); - clear_bit(WLAN_ENABLED, &ar->flag); - up(&ar->sem); } EXPORT_SYMBOL(ath6kl_stop_txrx); diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index 62bcc0d5bc23..5fcd342762de 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -159,10 +159,8 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe, static void ath6kl_usb_cleanup_recv_urb(struct ath6kl_urb_context *urb_context) { - if (urb_context->skb != NULL) { - dev_kfree_skb(urb_context->skb); - urb_context->skb = NULL; - } + dev_kfree_skb(urb_context->skb); + urb_context->skb = NULL; ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context); } diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 998f8b0f62fd..d76b5bd81a0d 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -751,6 +751,23 @@ int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid) NO_SYNC_WMIFLAG); } +int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx, + u32 beacon_intvl) +{ + struct sk_buff *skb; + struct set_beacon_int_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct set_beacon_int_cmd *) skb->data; + + cmd->beacon_intvl = cpu_to_le32(beacon_intvl); + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, + WMI_SET_BEACON_INT_CMDID, NO_SYNC_WMIFLAG); +} + int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period) { struct sk_buff *skb; @@ -1108,7 +1125,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, kfree(mgmt); if (bss == NULL) return -ENOMEM; - cfg80211_put_bss(bss); + cfg80211_put_bss(ar->wiphy, bss); /* * Firmware doesn't return any event when scheduled scan has @@ -2480,16 +2497,11 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx) free_cmd_skb: /* free up any resources left over (possibly due to an error) */ - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); free_data_skb: - for (index = 0; index < num_pri_streams; index++) { - if (data_sync_bufs[index].skb != NULL) { - dev_kfree_skb((struct sk_buff *)data_sync_bufs[index]. - skb); - } - } + for (index = 0; index < num_pri_streams; index++) + dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].skb); return ret; } diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h index 98b1755e67f4..b5f226503baf 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.h +++ b/drivers/net/wireless/ath/ath6kl/wmi.h @@ -1660,6 +1660,10 @@ struct roam_ctrl_cmd { u8 roam_ctrl; } __packed; +struct set_beacon_int_cmd { + __le32 beacon_intvl; +} __packed; + struct set_dtim_cmd { __le32 dtim_period; } __packed; @@ -2649,6 +2653,8 @@ int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi); int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi); int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period); +int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx, + u32 beacon_interval); int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid); int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode); int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on); diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 7647ed6b73d7..17507dc8a1e7 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -58,6 +58,7 @@ config ATH9K_DEBUGFS bool "Atheros ath9k debugging" depends on ATH9K select MAC80211_DEBUGFS + select RELAY ---help--- Say Y, if you need access to ath9k's statistics for interrupts, rate control, etc. diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index 3a69804f4c16..d1ff3c246a12 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -86,29 +86,25 @@ static int ath_ahb_probe(struct platform_device *pdev) if (!pdev->dev.platform_data) { dev_err(&pdev->dev, "no platform data specified\n"); - ret = -EINVAL; - goto err_out; + return -EINVAL; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no memory resource found\n"); - ret = -ENXIO; - goto err_out; + return -ENXIO; } - mem = ioremap_nocache(res->start, resource_size(res)); + mem = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (mem == NULL) { dev_err(&pdev->dev, "ioremap failed\n"); - ret = -ENOMEM; - goto err_out; + return -ENOMEM; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(&pdev->dev, "no IRQ resource found\n"); - ret = -ENXIO; - goto err_iounmap; + return -ENXIO; } irq = res->start; @@ -116,8 +112,7 @@ static int ath_ahb_probe(struct platform_device *pdev) hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops); if (hw == NULL) { dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); - ret = -ENOMEM; - goto err_iounmap; + return -ENOMEM; } SET_IEEE80211_DEV(hw, &pdev->dev); @@ -156,9 +151,6 @@ static int ath_ahb_probe(struct platform_device *pdev) err_free_hw: ieee80211_free_hw(hw); platform_set_drvdata(pdev, NULL); - err_iounmap: - iounmap(mem); - err_out: return ret; } @@ -168,12 +160,10 @@ static int ath_ahb_remove(struct platform_device *pdev) if (hw) { struct ath_softc *sc = hw->priv; - void __iomem *mem = sc->mem; ath9k_deinit_device(sc); free_irq(sc->irq, sc); ieee80211_free_hw(sc->hw); - iounmap(mem); platform_set_drvdata(pdev, NULL); } diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index e09ec40ce71a..7ecd40f07a74 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -152,7 +152,8 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel, ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", aniState->ofdmNoiseImmunityLevel, immunityLevel, BEACON_RSSI(ah), - aniState->rssiThrLow, aniState->rssiThrHigh); + ATH9K_ANI_RSSI_THR_LOW, + ATH9K_ANI_RSSI_THR_HIGH); if (!scan) aniState->ofdmNoiseImmunityLevel = immunityLevel; @@ -173,7 +174,7 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel, weak_sig = entry_ofdm->ofdm_weak_signal_on; if (ah->opmode == NL80211_IFTYPE_STATION && - BEACON_RSSI(ah) <= aniState->rssiThrHigh) + BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH) weak_sig = true; if (aniState->ofdmWeakSigDetect != weak_sig) @@ -216,11 +217,11 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel, ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", aniState->cckNoiseImmunityLevel, immunityLevel, - BEACON_RSSI(ah), aniState->rssiThrLow, - aniState->rssiThrHigh); + BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW, + ATH9K_ANI_RSSI_THR_HIGH); if (ah->opmode == NL80211_IFTYPE_STATION && - BEACON_RSSI(ah) <= aniState->rssiThrLow && + BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW && immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI) immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI; @@ -418,9 +419,6 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan) return; aniState = &ah->curchan->ani; - if (WARN_ON(!aniState)) - return; - if (!ath9k_hw_ani_read_counters(ah)) return; @@ -489,23 +487,6 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah) } EXPORT_SYMBOL(ath9k_hw_disable_mib_counters); -void ath9k_hw_ani_setup(struct ath_hw *ah) -{ - int i; - - static const int totalSizeDesired[] = { -55, -55, -55, -55, -62 }; - static const int coarseHigh[] = { -14, -14, -14, -14, -12 }; - static const int coarseLow[] = { -64, -64, -64, -64, -70 }; - static const int firpwr[] = { -78, -78, -78, -78, -80 }; - - for (i = 0; i < 5; i++) { - ah->totalSizeDesired[i] = totalSizeDesired[i]; - ah->coarse_high[i] = coarseHigh[i]; - ah->coarse_low[i] = coarseLow[i]; - ah->firpwr[i] = firpwr[i]; - } -} - void ath9k_hw_ani_init(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); @@ -531,8 +512,6 @@ void ath9k_hw_ani_init(struct ath_hw *ah) ani->ofdmsTurn = true; - ani->rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH; - ani->rssiThrLow = ATH9K_ANI_RSSI_THR_LOW; ani->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG; ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL; ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL; diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h index 1485bf5e3518..dddb1361039a 100644 --- a/drivers/net/wireless/ath/ath9k/ani.h +++ b/drivers/net/wireless/ath/ath9k/ani.h @@ -104,7 +104,6 @@ struct ath9k_ani_default { }; struct ar5416AniState { - struct ath9k_channel *c; u8 noiseImmunityLevel; u8 ofdmNoiseImmunityLevel; u8 cckNoiseImmunityLevel; @@ -113,15 +112,9 @@ struct ar5416AniState { u8 spurImmunityLevel; u8 firstepLevel; u8 ofdmWeakSigDetect; - u8 cckWeakSigThreshold; u32 listenTime; - int32_t rssiThrLow; - int32_t rssiThrHigh; u32 ofdmPhyErrCount; u32 cckPhyErrCount; - int16_t pktRssi[2]; - int16_t ofdmErrRssi[2]; - int16_t cckErrRssi[2]; struct ath9k_ani_default iniDef; }; @@ -147,7 +140,6 @@ struct ar5416Stats { void ath9k_enable_mib_counters(struct ath_hw *ah); void ath9k_hw_disable_mib_counters(struct ath_hw *ah); -void ath9k_hw_ani_setup(struct ath_hw *ah); void ath9k_hw_ani_init(struct ath_hw *ah); #endif /* ANI_H */ diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h index f81e7fc60a36..467ccfae2cee 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h @@ -466,7 +466,7 @@ static const u32 ar5416Bank0[][2] = { }; static const u32 ar5416BB_RfGain[][3] = { - /* Addr 5G_HT20 5G_HT40 */ + /* Addr 5G 2G */ {0x00009a00, 0x00000000, 0x00000000}, {0x00009a04, 0x00000040, 0x00000040}, {0x00009a08, 0x00000080, 0x00000080}, @@ -546,12 +546,12 @@ static const u32 ar5416Bank2[][2] = { }; static const u32 ar5416Bank3[][3] = { - /* Addr 5G_HT20 5G_HT40 */ + /* Addr 5G 2G */ {0x000098f0, 0x01400018, 0x01c00018}, }; static const u32 ar5416Bank6[][3] = { - /* Addr 5G_HT20 5G_HT40 */ + /* Addr 5G 2G */ {0x0000989c, 0x00000000, 0x00000000}, {0x0000989c, 0x00000000, 0x00000000}, {0x0000989c, 0x00000000, 0x00000000}, @@ -588,7 +588,7 @@ static const u32 ar5416Bank6[][3] = { }; static const u32 ar5416Bank6TPC[][3] = { - /* Addr 5G_HT20 5G_HT40 */ + /* Addr 5G 2G */ {0x0000989c, 0x00000000, 0x00000000}, {0x0000989c, 0x00000000, 0x00000000}, {0x0000989c, 0x00000000, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 874186bfda41..fd69376ecc83 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -470,16 +470,15 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah, static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah) { #define ATH_ALLOC_BANK(bank, size) do { \ - bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \ - if (!bank) { \ - ath_err(common, "Cannot allocate RF banks\n"); \ - return -ENOMEM; \ - } \ + bank = devm_kzalloc(ah->dev, sizeof(u32) * size, GFP_KERNEL); \ + if (!bank) \ + goto error; \ } while (0); struct ath_common *common = ath9k_hw_common(ah); - BUG_ON(AR_SREV_9280_20_OR_LATER(ah)); + if (AR_SREV_9280_20_OR_LATER(ah)) + return 0; ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows); ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows); @@ -492,35 +491,12 @@ static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah) return 0; #undef ATH_ALLOC_BANK +error: + ath_err(common, "Cannot allocate RF banks\n"); + return -ENOMEM; } -/** - * ar5008_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers - * @ah: atheros hardware struture - * For the external AR2133/AR5133 radios banks. - */ -static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah) -{ -#define ATH_FREE_BANK(bank) do { \ - kfree(bank); \ - bank = NULL; \ - } while (0); - - BUG_ON(AR_SREV_9280_20_OR_LATER(ah)); - - ATH_FREE_BANK(ah->analogBank0Data); - ATH_FREE_BANK(ah->analogBank1Data); - ATH_FREE_BANK(ah->analogBank2Data); - ATH_FREE_BANK(ah->analogBank3Data); - ATH_FREE_BANK(ah->analogBank6Data); - ATH_FREE_BANK(ah->analogBank6TPCData); - ATH_FREE_BANK(ah->analogBank7Data); - ATH_FREE_BANK(ah->bank6Temp); - -#undef ATH_FREE_BANK -} - /* * * ar5008_hw_set_rf_regs - programs rf registers based on EEPROM * @ah: atheros hardware structure @@ -1380,7 +1356,7 @@ static void ar5008_hw_set_radar_conf(struct ath_hw *ah) conf->radar_inband = 8; } -void ar5008_hw_attach_phy_ops(struct ath_hw *ah) +int ar5008_hw_attach_phy_ops(struct ath_hw *ah) { struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); static const u32 ar5416_cca_regs[6] = { @@ -1391,12 +1367,15 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah) AR_PHY_CH1_EXT_CCA, AR_PHY_CH2_EXT_CCA }; + int ret; + + ret = ar5008_hw_rf_alloc_ext_banks(ah); + if (ret) + return ret; priv_ops->rf_set_freq = ar5008_hw_set_channel; priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate; - priv_ops->rf_alloc_ext_banks = ar5008_hw_rf_alloc_ext_banks; - priv_ops->rf_free_ext_banks = ar5008_hw_rf_free_ext_banks; priv_ops->set_rf_regs = ar5008_hw_set_rf_regs; priv_ops->set_channel_regs = ar5008_hw_set_channel_regs; priv_ops->init_bb = ar5008_hw_init_bb; @@ -1421,4 +1400,5 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah) ar5008_hw_set_nf_limits(ah); ar5008_hw_set_radar_conf(ah); memcpy(ah->nf_regs, ar5416_cca_regs, sizeof(ah->nf_regs)); + return 0; } diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h index ea4a230997ac..59524e1d4678 100644 --- a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h @@ -460,7 +460,7 @@ static const u32 ar5416Common_9100[][2] = { }; static const u32 ar5416Bank6_9100[][3] = { - /* Addr 5G_HT20 5G_HT40 */ + /* Addr 5G 2G */ {0x0000989c, 0x00000000, 0x00000000}, {0x0000989c, 0x00000000, 0x00000000}, {0x0000989c, 0x00000000, 0x00000000}, @@ -497,7 +497,7 @@ static const u32 ar5416Bank6_9100[][3] = { }; static const u32 ar5416Bank6TPC_9100[][3] = { - /* Addr 5G_HT20 5G_HT40 */ + /* Addr 5G 2G */ {0x0000989c, 0x00000000, 0x00000000}, {0x0000989c, 0x00000000, 0x00000000}, {0x0000989c, 0x00000000, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index 648da3e885e9..f053d978540e 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -23,13 +23,13 @@ /* General hardware code for the A5008/AR9001/AR9002 hadware families */ -static void ar9002_hw_init_mode_regs(struct ath_hw *ah) +static int ar9002_hw_init_mode_regs(struct ath_hw *ah) { if (AR_SREV_9271(ah)) { INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271); INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271); INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg); - return; + return 0; } if (ah->config.pcie_clock_req) @@ -102,9 +102,9 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah) u32 size = sizeof(u32) * addac->ia_rows * addac->ia_columns; u32 *data; - data = kmalloc(size, GFP_KERNEL); + data = devm_kzalloc(ah->dev, size, GFP_KERNEL); if (!data) - return; + return -ENOMEM; memcpy(data, addac->ia_array, size); addac->ia_array = data; @@ -120,6 +120,7 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah) INIT_INI_ARRAY(&ah->iniCckfirJapan2484, ar9287Common_japan_2484_cck_fir_coeff_9287_1_1); } + return 0; } static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah) @@ -409,22 +410,30 @@ void ar9002_hw_enable_async_fifo(struct ath_hw *ah) } /* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */ -void ar9002_hw_attach_ops(struct ath_hw *ah) +int ar9002_hw_attach_ops(struct ath_hw *ah) { struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); struct ath_hw_ops *ops = ath9k_hw_ops(ah); + int ret; + + ret = ar9002_hw_init_mode_regs(ah); + if (ret) + return ret; - priv_ops->init_mode_regs = ar9002_hw_init_mode_regs; priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs; ops->config_pci_powersave = ar9002_hw_configpcipowersave; - ar5008_hw_attach_phy_ops(ah); + ret = ar5008_hw_attach_phy_ops(ah); + if (ret) + return ret; + if (AR_SREV_9280_20_OR_LATER(ah)) ar9002_hw_attach_phy_ops(ah); ar9002_hw_attach_calib_ops(ah); ar9002_hw_attach_mac_ops(ah); + return 0; } void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan) diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index 846dd7974eb8..f4003512d8d5 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -555,14 +555,73 @@ static void ar9002_hw_antdiv_comb_conf_set(struct ath_hw *ah, REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval); } +static void ar9002_hw_spectral_scan_config(struct ath_hw *ah, + struct ath_spec_scan *param) +{ + u8 count; + + if (!param->enabled) { + REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_ENABLE); + return; + } + REG_SET_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_FFT_ENA); + REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE); + + if (param->short_repeat) + REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT); + else + REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT); + + /* on AR92xx, the highest bit of count will make the the chip send + * spectral samples endlessly. Check if this really was intended, + * and fix otherwise. + */ + count = param->count; + if (param->endless) + count = 0x80; + else if (count & 0x80) + count = 0x7f; + + REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_COUNT, count); + REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_PERIOD, param->period); + REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_FFT_PERIOD, param->fft_period); + + return; +} + +static void ar9002_hw_spectral_scan_trigger(struct ath_hw *ah) +{ + REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE); + /* Activate spectral scan */ + REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_ACTIVE); +} + +static void ar9002_hw_spectral_scan_wait(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + /* Poll for spectral scan complete */ + if (!ath9k_hw_wait(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_ACTIVE, + 0, AH_WAIT_TIMEOUT)) { + ath_err(common, "spectral scan wait failed\n"); + return; + } +} + void ar9002_hw_attach_phy_ops(struct ath_hw *ah) { struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); struct ath_hw_ops *ops = ath9k_hw_ops(ah); priv_ops->set_rf_regs = NULL; - priv_ops->rf_alloc_ext_banks = NULL; - priv_ops->rf_free_ext_banks = NULL; priv_ops->rf_set_freq = ar9002_hw_set_channel; priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate; priv_ops->olc_init = ar9002_olc_init; @@ -571,6 +630,9 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah) ops->antdiv_comb_conf_get = ar9002_hw_antdiv_comb_conf_get; ops->antdiv_comb_conf_set = ar9002_hw_antdiv_comb_conf_set; + ops->spectral_scan_config = ar9002_hw_spectral_scan_config; + ops->spectral_scan_trigger = ar9002_hw_spectral_scan_trigger; + ops->spectral_scan_wait = ar9002_hw_spectral_scan_wait; ar9002_hw_set_nf_limits(ah); } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index 262e1e036fd7..db5ffada2217 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h @@ -744,6 +744,186 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = { {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, }; +static const u32 ar9300Modes_mixed_ob_db_tx_gain_table_2p2[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352}, + {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584}, + {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800}, + {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, + {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004}, + {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, + {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202}, + {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400}, + {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402}, + {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404}, + {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603}, + {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02}, + {0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04}, + {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x26000a20, 0x26000a20}, + {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2a000e20, 0x2a000e20}, + {0x0000a534, 0x4202242a, 0x4202242a, 0x2e000e22, 0x2e000e22}, + {0x0000a538, 0x4702244a, 0x4702244a, 0x31000e24, 0x31000e24}, + {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x34001640, 0x34001640}, + {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660}, + {0x0000a544, 0x52022470, 0x52022470, 0x3b001861, 0x3b001861}, + {0x0000a548, 0x55022490, 0x55022490, 0x3e001a81, 0x3e001a81}, + {0x0000a54c, 0x59022492, 0x59022492, 0x42001a83, 0x42001a83}, + {0x0000a550, 0x5d022692, 0x5d022692, 0x44001c84, 0x44001c84}, + {0x0000a554, 0x61022892, 0x61022892, 0x48001ce3, 0x48001ce3}, + {0x0000a558, 0x65024890, 0x65024890, 0x4c001ce5, 0x4c001ce5}, + {0x0000a55c, 0x69024892, 0x69024892, 0x50001ce9, 0x50001ce9}, + {0x0000a560, 0x6e024c92, 0x6e024c92, 0x54001ceb, 0x54001ceb}, + {0x0000a564, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec}, + {0x0000a568, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec}, + {0x0000a56c, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec}, + {0x0000a570, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec}, + {0x0000a574, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec}, + {0x0000a578, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec}, + {0x0000a57c, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec}, + {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, + {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, + {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, + {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200}, + {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202}, + {0x0000a594, 0x1c800223, 0x1c800223, 0x11800400, 0x11800400}, + {0x0000a598, 0x21802220, 0x21802220, 0x15800402, 0x15800402}, + {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404}, + {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800603, 0x1b800603}, + {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800a02, 0x1f800a02}, + {0x0000a5a8, 0x34822225, 0x34822225, 0x23800a04, 0x23800a04}, + {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x26800a20, 0x26800a20}, + {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2a800e20, 0x2a800e20}, + {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2e800e22, 0x2e800e22}, + {0x0000a5b8, 0x4782244a, 0x4782244a, 0x31800e24, 0x31800e24}, + {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x34801640, 0x34801640}, + {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38801660, 0x38801660}, + {0x0000a5c4, 0x52822470, 0x52822470, 0x3b801861, 0x3b801861}, + {0x0000a5c8, 0x55822490, 0x55822490, 0x3e801a81, 0x3e801a81}, + {0x0000a5cc, 0x59822492, 0x59822492, 0x42801a83, 0x42801a83}, + {0x0000a5d0, 0x5d822692, 0x5d822692, 0x44801c84, 0x44801c84}, + {0x0000a5d4, 0x61822892, 0x61822892, 0x48801ce3, 0x48801ce3}, + {0x0000a5d8, 0x65824890, 0x65824890, 0x4c801ce5, 0x4c801ce5}, + {0x0000a5dc, 0x69824892, 0x69824892, 0x50801ce9, 0x50801ce9}, + {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x54801ceb, 0x54801ceb}, + {0x0000a5e4, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec}, + {0x0000a5e8, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec}, + {0x0000a5ec, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec}, + {0x0000a5f0, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec}, + {0x0000a5f4, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec}, + {0x0000a5f8, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec}, + {0x0000a5fc, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec}, + {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000}, + {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501}, + {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501}, + {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03}, + {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04}, + {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04}, + {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, + {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, + {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, + {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, + {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, + {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352}, + {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584}, + {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800}, + {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, + {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352}, + {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584}, + {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800}, + {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, + {0x00016044, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4}, + {0x00016048, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001}, + {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, + {0x00016444, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4}, + {0x00016448, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001}, + {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, + {0x00016844, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4}, + {0x00016848, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001}, + {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, +}; + +static const u32 ar9300Modes_type5_tx_gain_table_2p2[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, + {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, + {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, + {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, + {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004}, + {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, + {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202}, + {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400}, + {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402}, + {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404}, + {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603}, + {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02}, + {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04}, + {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20}, + {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20}, + {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22}, + {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24}, + {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640}, + {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660}, + {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861}, + {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81}, + {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83}, + {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84}, + {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3}, + {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5}, + {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9}, + {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb}, + {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, + {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, + {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, + {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000}, + {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501}, + {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501}, + {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03}, + {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04}, + {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04}, + {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, + {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, + {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, + {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, + {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, + {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, + {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, + {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, + {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, + {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, + {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, + {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, + {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, + {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, + {0x00016048, 0x65240001, 0x65240001, 0x66480001, 0x66480001}, + {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, + {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, + {0x00016448, 0x65240001, 0x65240001, 0x66480001, 0x66480001}, + {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, + {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, + {0x00016848, 0x65240001, 0x65240001, 0x66480001, 0x66480001}, + {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, +}; + static const u32 ar9300Common_rx_gain_table_2p2[][2] = { /* Addr allmodes */ {0x0000a000, 0x00010000}, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 56317b0fb6b6..4cc13940c895 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -32,7 +32,6 @@ struct coeff { enum ar9003_cal_types { IQ_MISMATCH_CAL = BIT(0), - TEMP_COMP_CAL = BIT(1), }; static void ar9003_hw_setup_calibration(struct ath_hw *ah, @@ -49,7 +48,7 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah, */ REG_RMW_FIELD(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX, - currCal->calData->calCountMax); + currCal->calData->calCountMax); REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ); ath_dbg(common, CALIBRATE, @@ -58,14 +57,8 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah, /* Kick-off cal */ REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL); break; - case TEMP_COMP_CAL: - REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM, - AR_PHY_65NM_CH0_THERM_LOCAL, 1); - REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM, - AR_PHY_65NM_CH0_THERM_START, 1); - - ath_dbg(common, CALIBRATE, - "starting Temperature Compensation Calibration\n"); + default: + ath_err(common, "Invalid calibration type\n"); break; } } @@ -323,6 +316,14 @@ static const struct ath9k_percal_data iq_cal_single_sample = { static void ar9003_hw_init_cal_settings(struct ath_hw *ah) { ah->iq_caldata.calData = &iq_cal_single_sample; + + if (AR_SREV_9300_20_OR_LATER(ah)) { + ah->enabled_cals |= TX_IQ_CAL; + if (AR_SREV_9485_OR_LATER(ah) && !AR_SREV_9340(ah)) + ah->enabled_cals |= TX_IQ_ON_AGC_CAL; + } + + ah->supp_cals = IQ_MISMATCH_CAL; } /* @@ -959,22 +960,68 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g) AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0); } +static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + int i; + + if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah)) + return; + + for (i = 0; i < AR9300_MAX_CHAINS; i++) { + if (!(ah->rxchainmask & (1 << i))) + continue; + ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan)); + } +} + +static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable) +{ + u32 cl_idx[AR9300_MAX_CHAINS] = { AR_PHY_CL_TAB_0, + AR_PHY_CL_TAB_1, + AR_PHY_CL_TAB_2 }; + struct ath9k_hw_cal_data *caldata = ah->caldata; + bool txclcal_done = false; + int i, j; + + if (!caldata || !(ah->enabled_cals & TX_CL_CAL)) + return; + + txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & + AR_PHY_AGC_CONTROL_CLC_SUCCESS); + + if (caldata->done_txclcal_once) { + for (i = 0; i < AR9300_MAX_CHAINS; i++) { + if (!(ah->txchainmask & (1 << i))) + continue; + for (j = 0; j < MAX_CL_TAB_ENTRY; j++) + REG_WRITE(ah, CL_TAB_ENTRY(cl_idx[i]), + caldata->tx_clcal[i][j]); + } + } else if (is_reusable && txclcal_done) { + for (i = 0; i < AR9300_MAX_CHAINS; i++) { + if (!(ah->txchainmask & (1 << i))) + continue; + for (j = 0; j < MAX_CL_TAB_ENTRY; j++) + caldata->tx_clcal[i][j] = + REG_READ(ah, CL_TAB_ENTRY(cl_idx[i])); + } + caldata->done_txclcal_once = true; + } +} + static bool ar9003_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_cal_data *caldata = ah->caldata; - bool txiqcal_done = false, txclcal_done = false; + bool txiqcal_done = false; bool is_reusable = true, status = true; - bool run_rtt_cal = false, run_agc_cal; + bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false; bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT); u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL | AR_PHY_AGC_CONTROL_FLTR_CAL | AR_PHY_AGC_CONTROL_PKDET_CAL; - int i, j; - u32 cl_idx[AR9300_MAX_CHAINS] = { AR_PHY_CL_TAB_0, - AR_PHY_CL_TAB_1, - AR_PHY_CL_TAB_2 }; ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask); @@ -1014,7 +1061,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, } } - if (!(ah->enabled_cals & TX_IQ_CAL)) + if ((IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan)) || + !(ah->enabled_cals & TX_IQ_CAL)) goto skip_tx_iqcal; /* Do Tx IQ Calibration */ @@ -1034,21 +1082,22 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0, AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL); txiqcal_done = run_agc_cal = true; - goto skip_tx_iqcal; - } else if (caldata && !caldata->done_txiqcal_once) + } else if (caldata && !caldata->done_txiqcal_once) { run_agc_cal = true; + sep_iq_cal = true; + } +skip_tx_iqcal: if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal) ar9003_mci_init_cal_req(ah, &is_reusable); - if (!(IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))) { + if (sep_iq_cal) { txiqcal_done = ar9003_hw_tx_iq_cal_run(ah); REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); udelay(5); REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); } -skip_tx_iqcal: if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { /* Calibrate the AGC */ REG_WRITE(ah, AR_PHY_AGC_CONTROL, @@ -1059,14 +1108,8 @@ skip_tx_iqcal: status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT); - if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { - for (i = 0; i < AR9300_MAX_CHAINS; i++) { - if (!(ah->rxchainmask & (1 << i))) - continue; - ar9003_hw_manual_peak_cal(ah, i, - IS_CHAN_2GHZ(chan)); - } - } + + ar9003_hw_do_manual_peak_cal(ah, chan); } if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal) @@ -1091,31 +1134,7 @@ skip_tx_iqcal: else if (caldata && caldata->done_txiqcal_once) ar9003_hw_tx_iq_cal_reload(ah); -#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j)) - if (caldata && (ah->enabled_cals & TX_CL_CAL)) { - txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & - AR_PHY_AGC_CONTROL_CLC_SUCCESS); - if (caldata->done_txclcal_once) { - for (i = 0; i < AR9300_MAX_CHAINS; i++) { - if (!(ah->txchainmask & (1 << i))) - continue; - for (j = 0; j < MAX_CL_TAB_ENTRY; j++) - REG_WRITE(ah, CL_TAB_ENTRY(cl_idx[i]), - caldata->tx_clcal[i][j]); - } - } else if (is_reusable && txclcal_done) { - for (i = 0; i < AR9300_MAX_CHAINS; i++) { - if (!(ah->txchainmask & (1 << i))) - continue; - for (j = 0; j < MAX_CL_TAB_ENTRY; j++) - caldata->tx_clcal[i][j] = - REG_READ(ah, - CL_TAB_ENTRY(cl_idx[i])); - } - caldata->done_txclcal_once = true; - } - } -#undef CL_TAB_ENTRY + ar9003_hw_cl_cal_post_proc(ah, is_reusable); if (run_rtt_cal && caldata) { if (is_reusable) { @@ -1133,20 +1152,10 @@ skip_tx_iqcal: /* Initialize list pointers */ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; - ah->supp_cals = IQ_MISMATCH_CAL; - - if (ah->supp_cals & IQ_MISMATCH_CAL) { - INIT_CAL(&ah->iq_caldata); - INSERT_CAL(ah, &ah->iq_caldata); - ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n"); - } - if (ah->supp_cals & TEMP_COMP_CAL) { - INIT_CAL(&ah->tempCompCalData); - INSERT_CAL(ah, &ah->tempCompCalData); - ath_dbg(common, CALIBRATE, - "enabling Temperature Compensation Calibration\n"); - } + INIT_CAL(&ah->iq_caldata); + INSERT_CAL(ah, &ah->iq_caldata); + ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n"); /* Initialize current pointer to first element in list */ ah->cal_list_curr = ah->cal_list; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 562186ca9b52..881e989ea470 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -4586,14 +4586,14 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, return 0; } -static int ar9003_hw_power_control_override(struct ath_hw *ah, - int frequency, - int *correction, - int *voltage, int *temperature) +static void ar9003_hw_power_control_override(struct ath_hw *ah, + int frequency, + int *correction, + int *voltage, int *temperature) { - int tempSlope = 0; + int temp_slope = 0, temp_slope1 = 0, temp_slope2 = 0; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; - int f[8], t[8], i; + int f[8], t[8], t1[3], t2[3], i; REG_RMW(ah, AR_PHY_TPC_11_B0, (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), @@ -4624,38 +4624,108 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah, * enable temperature compensation * Need to use register names */ - if (frequency < 4000) - tempSlope = eep->modalHeader2G.tempSlope; - else if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) { - for (i = 0; i < 8; i++) { - t[i] = eep->base_ext1.tempslopextension[i]; - f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0); + if (frequency < 4000) { + temp_slope = eep->modalHeader2G.tempSlope; + } else { + if (AR_SREV_9550(ah)) { + t[0] = eep->base_ext1.tempslopextension[2]; + t1[0] = eep->base_ext1.tempslopextension[3]; + t2[0] = eep->base_ext1.tempslopextension[4]; + f[0] = 5180; + + t[1] = eep->modalHeader5G.tempSlope; + t1[1] = eep->base_ext1.tempslopextension[0]; + t2[1] = eep->base_ext1.tempslopextension[1]; + f[1] = 5500; + + t[2] = eep->base_ext1.tempslopextension[5]; + t1[2] = eep->base_ext1.tempslopextension[6]; + t2[2] = eep->base_ext1.tempslopextension[7]; + f[2] = 5785; + + temp_slope = ar9003_hw_power_interpolate(frequency, + f, t, 3); + temp_slope1 = ar9003_hw_power_interpolate(frequency, + f, t1, 3); + temp_slope2 = ar9003_hw_power_interpolate(frequency, + f, t2, 3); + + goto tempslope; } - tempSlope = ar9003_hw_power_interpolate((s32) frequency, - f, t, 8); - } else if (eep->base_ext2.tempSlopeLow != 0) { - t[0] = eep->base_ext2.tempSlopeLow; - f[0] = 5180; - t[1] = eep->modalHeader5G.tempSlope; - f[1] = 5500; - t[2] = eep->base_ext2.tempSlopeHigh; - f[2] = 5785; - tempSlope = ar9003_hw_power_interpolate((s32) frequency, - f, t, 3); - } else - tempSlope = eep->modalHeader5G.tempSlope; - REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope); + if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) { + for (i = 0; i < 8; i++) { + t[i] = eep->base_ext1.tempslopextension[i]; + f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0); + } + temp_slope = ar9003_hw_power_interpolate((s32) frequency, + f, t, 8); + } else if (eep->base_ext2.tempSlopeLow != 0) { + t[0] = eep->base_ext2.tempSlopeLow; + f[0] = 5180; + t[1] = eep->modalHeader5G.tempSlope; + f[1] = 5500; + t[2] = eep->base_ext2.tempSlopeHigh; + f[2] = 5785; + temp_slope = ar9003_hw_power_interpolate((s32) frequency, + f, t, 3); + } else { + temp_slope = eep->modalHeader5G.tempSlope; + } + } + +tempslope: + if (AR_SREV_9550(ah)) { + /* + * AR955x has tempSlope register for each chain. + * Check whether temp_compensation feature is enabled or not. + */ + if (eep->baseEepHeader.featureEnable & 0x1) { + if (frequency < 4000) { + REG_RMW_FIELD(ah, AR_PHY_TPC_19, + AR_PHY_TPC_19_ALPHA_THERM, + eep->base_ext2.tempSlopeLow); + REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1, + AR_PHY_TPC_19_ALPHA_THERM, + temp_slope); + REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2, + AR_PHY_TPC_19_ALPHA_THERM, + eep->base_ext2.tempSlopeHigh); + } else { + REG_RMW_FIELD(ah, AR_PHY_TPC_19, + AR_PHY_TPC_19_ALPHA_THERM, + temp_slope); + REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1, + AR_PHY_TPC_19_ALPHA_THERM, + temp_slope1); + REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2, + AR_PHY_TPC_19_ALPHA_THERM, + temp_slope2); + } + } else { + /* + * If temp compensation is not enabled, + * set all registers to 0. + */ + REG_RMW_FIELD(ah, AR_PHY_TPC_19, + AR_PHY_TPC_19_ALPHA_THERM, 0); + REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1, + AR_PHY_TPC_19_ALPHA_THERM, 0); + REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2, + AR_PHY_TPC_19_ALPHA_THERM, 0); + } + } else { + REG_RMW_FIELD(ah, AR_PHY_TPC_19, + AR_PHY_TPC_19_ALPHA_THERM, temp_slope); + } if (AR_SREV_9462_20(ah)) REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1, - AR_PHY_TPC_19_B1_ALPHA_THERM, tempSlope); + AR_PHY_TPC_19_B1_ALPHA_THERM, temp_slope); REG_RMW_FIELD(ah, AR_PHY_TPC_18, AR_PHY_TPC_18_THERM_CAL_VALUE, temperature[0]); - - return 0; } /* Apply the recorded correction values. */ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index 59bf5f31e212..a3523c969a3a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -507,28 +507,59 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah) else if (AR_SREV_9580(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9580_1p0_mixed_ob_db_tx_gain_table); + else + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9300Modes_mixed_ob_db_tx_gain_table_2p2); +} + +static void ar9003_tx_gain_table_mode5(struct ath_hw *ah) +{ + if (AR_SREV_9485_11(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9485Modes_green_ob_db_tx_gain_1_1); + else if (AR_SREV_9340(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9340Modes_ub124_tx_gain_table_1p0); + else if (AR_SREV_9580(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9580_1p0_type5_tx_gain_table); + else if (AR_SREV_9300_22(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9300Modes_type5_tx_gain_table_2p2); } +static void ar9003_tx_gain_table_mode6(struct ath_hw *ah) +{ + if (AR_SREV_9340(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0); + else if (AR_SREV_9485_11(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9485Modes_green_spur_ob_db_tx_gain_1_1); + else if (AR_SREV_9580(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9580_1p0_type6_tx_gain_table); +} + +typedef void (*ath_txgain_tab)(struct ath_hw *ah); + static void ar9003_tx_gain_table_apply(struct ath_hw *ah) { - switch (ar9003_hw_get_tx_gain_idx(ah)) { - case 0: - default: - ar9003_tx_gain_table_mode0(ah); - break; - case 1: - ar9003_tx_gain_table_mode1(ah); - break; - case 2: - ar9003_tx_gain_table_mode2(ah); - break; - case 3: - ar9003_tx_gain_table_mode3(ah); - break; - case 4: - ar9003_tx_gain_table_mode4(ah); - break; - } + static const ath_txgain_tab modes[] = { + ar9003_tx_gain_table_mode0, + ar9003_tx_gain_table_mode1, + ar9003_tx_gain_table_mode2, + ar9003_tx_gain_table_mode3, + ar9003_tx_gain_table_mode4, + ar9003_tx_gain_table_mode5, + ar9003_tx_gain_table_mode6, + }; + int idx = ar9003_hw_get_tx_gain_idx(ah); + + if (idx >= ARRAY_SIZE(modes)) + idx = 0; + + modes[idx](ah); } static void ar9003_rx_gain_table_mode0(struct ath_hw *ah) @@ -673,7 +704,7 @@ void ar9003_hw_attach_ops(struct ath_hw *ah) struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); struct ath_hw_ops *ops = ath9k_hw_ops(ah); - priv_ops->init_mode_regs = ar9003_hw_init_mode_regs; + ar9003_hw_init_mode_regs(ah); priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs; ops->config_pci_powersave = ar9003_hw_configpcipowersave; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 3afc24bde6d6..2bf6548dd143 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -68,7 +68,7 @@ static const int m2ThreshExt_off = 127; static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) { u16 bMode, fracMode = 0, aModeRefSel = 0; - u32 freq, channelSel = 0, reg32 = 0; + u32 freq, chan_frac, div, channelSel = 0, reg32 = 0; struct chan_centers centers; int loadSynthChannel; @@ -77,9 +77,6 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) if (freq < 4800) { /* 2 GHz, fractional mode */ if (AR_SREV_9330(ah)) { - u32 chan_frac; - u32 div; - if (ah->is_clk_25mhz) div = 75; else @@ -89,34 +86,40 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) chan_frac = (((freq * 4) % div) * 0x20000) / div; channelSel = (channelSel << 17) | chan_frac; } else if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) { - u32 chan_frac; - /* - * freq_ref = 40 / (refdiva >> amoderefsel); where refdiva=1 and amoderefsel=0 + * freq_ref = 40 / (refdiva >> amoderefsel); + * where refdiva=1 and amoderefsel=0 * ndiv = ((chan_mhz * 4) / 3) / freq_ref; * chansel = int(ndiv), chanfrac = (ndiv - chansel) * 0x20000 */ channelSel = (freq * 4) / 120; chan_frac = (((freq * 4) % 120) * 0x20000) / 120; channelSel = (channelSel << 17) | chan_frac; - } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) { + } else if (AR_SREV_9340(ah)) { if (ah->is_clk_25mhz) { - u32 chan_frac; - channelSel = (freq * 2) / 75; chan_frac = (((freq * 2) % 75) * 0x20000) / 75; channelSel = (channelSel << 17) | chan_frac; - } else + } else { channelSel = CHANSEL_2G(freq) >> 1; - } else + } + } else if (AR_SREV_9550(ah)) { + if (ah->is_clk_25mhz) + div = 75; + else + div = 120; + + channelSel = (freq * 4) / div; + chan_frac = (((freq * 4) % div) * 0x20000) / div; + channelSel = (channelSel << 17) | chan_frac; + } else { channelSel = CHANSEL_2G(freq); + } /* Set to 2G mode */ bMode = 1; } else { if ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) && ah->is_clk_25mhz) { - u32 chan_frac; - channelSel = freq / 75; chan_frac = ((freq % 75) * 0x20000) / 75; channelSel = (channelSel << 17) | chan_frac; @@ -1437,6 +1440,67 @@ set_rfmode: return 0; } +static void ar9003_hw_spectral_scan_config(struct ath_hw *ah, + struct ath_spec_scan *param) +{ + u8 count; + + if (!param->enabled) { + REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_ENABLE); + return; + } + + REG_SET_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_FFT_ENA); + REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE); + + /* on AR93xx and newer, count = 0 will make the the chip send + * spectral samples endlessly. Check if this really was intended, + * and fix otherwise. + */ + count = param->count; + if (param->endless) + count = 0; + else if (param->count == 0) + count = 1; + + if (param->short_repeat) + REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT); + else + REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT); + + REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_COUNT, count); + REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_PERIOD, param->period); + REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_FFT_PERIOD, param->fft_period); + + return; +} + +static void ar9003_hw_spectral_scan_trigger(struct ath_hw *ah) +{ + /* Activate spectral scan */ + REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_ACTIVE); +} + +static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + /* Poll for spectral scan complete */ + if (!ath9k_hw_wait(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_ACTIVE, + 0, AH_WAIT_TIMEOUT)) { + ath_err(common, "spectral scan wait failed\n"); + return; + } +} + void ar9003_hw_attach_phy_ops(struct ath_hw *ah) { struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); @@ -1470,6 +1534,9 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah) ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get; ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set; ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv; + ops->spectral_scan_config = ar9003_hw_spectral_scan_config; + ops->spectral_scan_trigger = ar9003_hw_spectral_scan_trigger; + ops->spectral_scan_wait = ar9003_hw_spectral_scan_wait; ar9003_hw_set_nf_limits(ah); ar9003_hw_set_radar_conf(ah); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index 107956298488..e71774196c01 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -1028,7 +1028,7 @@ #define AR_PHY_TPC_5_B2 (AR_SM2_BASE + 0x208) #define AR_PHY_TPC_6_B2 (AR_SM2_BASE + 0x20c) #define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220) -#define AR_PHY_PDADC_TAB_2 (AR_SM2_BASE + 0x240) +#define AR_PHY_TPC_19_B2 (AR_SM2_BASE + 0x240) #define AR_PHY_TX_IQCAL_STATUS_B2 (AR_SM2_BASE + 0x48c) #define AR_PHY_TX_IQCAL_CORR_COEFF_B2(_i) (AR_SM2_BASE + 0x450 + ((_i) << 2)) diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h index f69d292bdc02..25db9215985a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h @@ -1172,6 +1172,106 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = { {0x00016448, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266}, }; +static const u32 ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03eaac5a, 0x03eaac5a}, + {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03f330ac, 0x03f330ac}, + {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc3f00, 0x03fc3f00}, + {0x0000a2e8, 0x00000000, 0x00000000, 0x03ffc000, 0x03ffc000}, + {0x0000a394, 0x00000444, 0x00000444, 0x00000404, 0x00000404}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a504, 0x06000003, 0x06000003, 0x02000001, 0x02000001}, + {0x0000a508, 0x0a000020, 0x0a000020, 0x05000003, 0x05000003}, + {0x0000a50c, 0x10000023, 0x10000023, 0x0a000005, 0x0a000005}, + {0x0000a510, 0x16000220, 0x16000220, 0x0e000201, 0x0e000201}, + {0x0000a514, 0x1c000223, 0x1c000223, 0x11000203, 0x11000203}, + {0x0000a518, 0x21002220, 0x21002220, 0x14000401, 0x14000401}, + {0x0000a51c, 0x27002223, 0x27002223, 0x18000403, 0x18000403}, + {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000602, 0x1b000602}, + {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000802, 0x1f000802}, + {0x0000a528, 0x34022225, 0x34022225, 0x21000620, 0x21000620}, + {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x25000820, 0x25000820}, + {0x0000a530, 0x3e02222c, 0x3e02222c, 0x29000822, 0x29000822}, + {0x0000a534, 0x4202242a, 0x4202242a, 0x2d000824, 0x2d000824}, + {0x0000a538, 0x4702244a, 0x4702244a, 0x30000828, 0x30000828}, + {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x3400082a, 0x3400082a}, + {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38000849, 0x38000849}, + {0x0000a544, 0x5302266c, 0x5302266c, 0x3b000a2c, 0x3b000a2c}, + {0x0000a548, 0x5702286c, 0x5702286c, 0x3e000e2b, 0x3e000e2b}, + {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42000e2d, 0x42000e2d}, + {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4500124a, 0x4500124a}, + {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4900124c, 0x4900124c}, + {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c00126c, 0x4c00126c}, + {0x0000a55c, 0x7002708c, 0x7002708c, 0x4f00128c, 0x4f00128c}, + {0x0000a560, 0x7302b08a, 0x7302b08a, 0x52001290, 0x52001290}, + {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292}, + {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292}, + {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292}, + {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292}, + {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292}, + {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292}, + {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292}, + {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, + {0x0000a584, 0x06800003, 0x06800003, 0x02800001, 0x02800001}, + {0x0000a588, 0x0a800020, 0x0a800020, 0x05800003, 0x05800003}, + {0x0000a58c, 0x10800023, 0x10800023, 0x0a800005, 0x0a800005}, + {0x0000a590, 0x16800220, 0x16800220, 0x0e800201, 0x0e800201}, + {0x0000a594, 0x1c800223, 0x1c800223, 0x11800203, 0x11800203}, + {0x0000a598, 0x21820220, 0x21820220, 0x14800401, 0x14800401}, + {0x0000a59c, 0x27820223, 0x27820223, 0x18800403, 0x18800403}, + {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800602, 0x1b800602}, + {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800802, 0x1f800802}, + {0x0000a5a8, 0x34822225, 0x34822225, 0x21800620, 0x21800620}, + {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x25800820, 0x25800820}, + {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x29800822, 0x29800822}, + {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2d800824, 0x2d800824}, + {0x0000a5b8, 0x4782244a, 0x4782244a, 0x30800828, 0x30800828}, + {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x3480082a, 0x3480082a}, + {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38800849, 0x38800849}, + {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3b800a2c, 0x3b800a2c}, + {0x0000a5c8, 0x5782286c, 0x5782286c, 0x3e800e2b, 0x3e800e2b}, + {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x42800e2d, 0x42800e2d}, + {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4580124a, 0x4580124a}, + {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4980124c, 0x4980124c}, + {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x4c80126c, 0x4c80126c}, + {0x0000a5dc, 0x7086308c, 0x7086308c, 0x4f80128c, 0x4f80128c}, + {0x0000a5e0, 0x738a308a, 0x738a308a, 0x52801290, 0x52801290}, + {0x0000a5e4, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292}, + {0x0000a5e8, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292}, + {0x0000a5ec, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292}, + {0x0000a5f0, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292}, + {0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292}, + {0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292}, + {0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292}, + {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a614, 0x01404000, 0x01404000, 0x01404501, 0x01404501}, + {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501}, + {0x0000a61c, 0x02008802, 0x02008802, 0x01404501, 0x01404501}, + {0x0000a620, 0x0300cc03, 0x0300cc03, 0x03c0cf02, 0x03c0cf02}, + {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03c0cf03, 0x03c0cf03}, + {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04011004, 0x04011004}, + {0x0000a62c, 0x03810c03, 0x03810c03, 0x05419405, 0x05419405}, + {0x0000a630, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506}, + {0x0000a634, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506}, + {0x0000a638, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506}, + {0x0000a63c, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506}, + {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03eaac5a, 0x03eaac5a}, + {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03f330ac, 0x03f330ac}, + {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc3f00, 0x03fc3f00}, + {0x0000b2e8, 0x00000000, 0x00000000, 0x03ffc000, 0x03ffc000}, + {0x00016044, 0x022492db, 0x022492db, 0x022492db, 0x022492db}, + {0x00016048, 0x24925666, 0x24925666, 0x24925266, 0x24925266}, + {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015}, + {0x00016288, 0xf0318000, 0xf0318000, 0xf0318000, 0xf0318000}, + {0x00016444, 0x022492db, 0x022492db, 0x022492db, 0x022492db}, + {0x00016448, 0x24925666, 0x24925666, 0x24925266, 0x24925266}, +}; + static const u32 ar9340_1p0_mac_core[][2] = { /* Addr allmodes */ {0x00000008, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h index a3710f3bb90c..712f415b8c08 100644 --- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h @@ -260,6 +260,79 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = { {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, }; +static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, + {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000}, + {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006}, + {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201}, + {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203}, + {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401}, + {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403}, + {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405}, + {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604}, + {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605}, + {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04}, + {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06}, + {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24}, + {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21}, + {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20}, + {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20}, + {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62}, + {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63}, + {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65}, + {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66}, + {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645}, + {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, + {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, + {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, + {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb}, + {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, + {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, + {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a}, + {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a}, + {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a}, + {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db}, + {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, +}; + static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002}, @@ -450,6 +523,79 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = { #define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1 +static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, + {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000}, + {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006}, + {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201}, + {0x0000a508, 0x0c002e00, 0x0c002e00, 0x07000203, 0x07000203}, + {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401}, + {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403}, + {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405}, + {0x0000a518, 0x25020ec0, 0x25020ec0, 0x14000406, 0x14000406}, + {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1800040a, 0x1800040a}, + {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000460, 0x1c000460}, + {0x0000a524, 0x35001fc4, 0x35001fc4, 0x22000463, 0x22000463}, + {0x0000a528, 0x3c022f04, 0x3c022f04, 0x26000465, 0x26000465}, + {0x0000a52c, 0x41023e85, 0x41023e85, 0x2e0006e0, 0x2e0006e0}, + {0x0000a530, 0x48023ec6, 0x48023ec6, 0x310006e0, 0x310006e0}, + {0x0000a534, 0x4d023f01, 0x4d023f01, 0x330006e0, 0x330006e0}, + {0x0000a538, 0x53023f4b, 0x53023f4b, 0x3e0008e3, 0x3e0008e3}, + {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x410008e5, 0x410008e5}, + {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x430008e6, 0x430008e6}, + {0x0000a544, 0x6502feca, 0x6502feca, 0x4a0008ec, 0x4a0008ec}, + {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4e0008f1, 0x4e0008f1}, + {0x0000a54c, 0x7203feca, 0x7203feca, 0x520008f3, 0x520008f3}, + {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x54000eed, 0x54000eed}, + {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x58000ef1, 0x58000ef1}, + {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5c000ef3, 0x5c000ef3}, + {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x60000ef5, 0x60000ef5}, + {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62000ef6, 0x62000ef6}, + {0x0000a564, 0x960fffcb, 0x960fffcb, 0x62000ef6, 0x62000ef6}, + {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6}, + {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6}, + {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6}, + {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6}, + {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6}, + {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6}, + {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a}, + {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a}, + {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a}, + {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db}, + {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, +}; + static const u32 ar9485_1_1[][2] = { /* Addr allmodes */ {0x0000a580, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h index df97f21c52dc..ccc5b6c99add 100644 --- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h @@ -23,16 +23,16 @@ static const u32 ar955x_1p0_radio_postamble[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ {0x00016098, 0xd2dd5554, 0xd2dd5554, 0xd28b3330, 0xd28b3330}, - {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x06345f2a, 0x06345f2a}, - {0x000160ac, 0xa4647c00, 0xa4647c00, 0xa4646800, 0xa4646800}, - {0x000160b0, 0x01885f52, 0x01885f52, 0x04accf3a, 0x04accf3a}, - {0x00016104, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001}, + {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x0a566f3a, 0x0a566f3a}, + {0x000160ac, 0xa4647c00, 0xa4647c00, 0x24647c00, 0x24647c00}, + {0x000160b0, 0x01885f52, 0x01885f52, 0x01885f52, 0x01885f52}, + {0x00016104, 0xb7a00000, 0xb7a00000, 0xb7a00001, 0xb7a00001}, {0x0001610c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000}, {0x00016140, 0x10804008, 0x10804008, 0x10804008, 0x10804008}, - {0x00016504, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001}, + {0x00016504, 0xb7a00000, 0xb7a00000, 0xb7a00001, 0xb7a00001}, {0x0001650c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000}, {0x00016540, 0x10804008, 0x10804008, 0x10804008, 0x10804008}, - {0x00016904, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001}, + {0x00016904, 0xb7a00000, 0xb7a00000, 0xb7a00001, 0xb7a00001}, {0x0001690c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000}, {0x00016940, 0x10804008, 0x10804008, 0x10804008, 0x10804008}, }; @@ -69,15 +69,15 @@ static const u32 ar955x_1p0_baseband_postamble[][5] = { {0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x005c0ec4, 0x005c0ec0}, {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004}, {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f}, - {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b}, + {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b}, {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff}, {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018}, {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108}, {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898}, {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002}, - {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e}, + {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01010e0e, 0x01010e0e}, {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501}, - {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, + {0x0000a264, 0x00000e0e, 0x00000e0e, 0x01000e0e, 0x01000e0e}, {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b}, {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010}, {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, @@ -125,7 +125,7 @@ static const u32 ar955x_1p0_radio_core[][2] = { {0x00016094, 0x00000000}, {0x000160a0, 0x0a108ffe}, {0x000160a4, 0x812fc370}, - {0x000160a8, 0x423c8000}, + {0x000160a8, 0x423c8100}, {0x000160b4, 0x92480080}, {0x000160c0, 0x006db6d0}, {0x000160c4, 0x6db6db60}, @@ -134,7 +134,7 @@ static const u32 ar955x_1p0_radio_core[][2] = { {0x00016100, 0x11999601}, {0x00016108, 0x00080010}, {0x00016144, 0x02084080}, - {0x00016148, 0x000080c0}, + {0x00016148, 0x00008040}, {0x00016280, 0x01800804}, {0x00016284, 0x00038dc5}, {0x00016288, 0x00000000}, @@ -178,7 +178,7 @@ static const u32 ar955x_1p0_radio_core[][2] = { {0x00016500, 0x11999601}, {0x00016508, 0x00080010}, {0x00016544, 0x02084080}, - {0x00016548, 0x000080c0}, + {0x00016548, 0x00008040}, {0x00016780, 0x00000000}, {0x00016784, 0x00000000}, {0x00016788, 0x00400705}, @@ -218,7 +218,7 @@ static const u32 ar955x_1p0_radio_core[][2] = { {0x00016900, 0x11999601}, {0x00016908, 0x00080010}, {0x00016944, 0x02084080}, - {0x00016948, 0x000080c0}, + {0x00016948, 0x00008040}, {0x00016b80, 0x00000000}, {0x00016b84, 0x00000000}, {0x00016b88, 0x00400705}, @@ -245,9 +245,9 @@ static const u32 ar955x_1p0_radio_core[][2] = { static const u32 ar955x_1p0_modes_xpa_tx_gain_table[][9] = { /* Addr 5G_HT20_L 5G_HT40_L 5G_HT20_M 5G_HT40_M 5G_HT20_H 5G_HT40_H 2G_HT40 2G_HT20 */ - {0x0000a2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa}, - {0x0000a2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc}, - {0x0000a2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0}, + {0x0000a2dc, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xfffd5aaa, 0xfffd5aaa}, + {0x0000a2e0, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffe9ccc, 0xfffe9ccc}, + {0x0000a2e4, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffffe0f0, 0xffffe0f0}, {0x0000a2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00}, {0x0000a410, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050da, 0x000050da}, {0x0000a500, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000000, 0x00000000}, @@ -256,63 +256,63 @@ static const u32 ar955x_1p0_modes_xpa_tx_gain_table[][9] = { {0x0000a50c, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c000006, 0x0c000006}, {0x0000a510, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x0f00000a, 0x0f00000a}, {0x0000a514, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x1300000c, 0x1300000c}, - {0x0000a518, 0x19004008, 0x19004008, 0x19004008, 0x19004008, 0x18004008, 0x18004008, 0x1700000e, 0x1700000e}, - {0x0000a51c, 0x1d00400a, 0x1d00400a, 0x1d00400a, 0x1d00400a, 0x1c00400a, 0x1c00400a, 0x1b000064, 0x1b000064}, - {0x0000a520, 0x230020a2, 0x230020a2, 0x210020a2, 0x210020a2, 0x200020a2, 0x200020a2, 0x1f000242, 0x1f000242}, - {0x0000a524, 0x2500006e, 0x2500006e, 0x2500006e, 0x2500006e, 0x2400006e, 0x2400006e, 0x23000229, 0x23000229}, - {0x0000a528, 0x29022221, 0x29022221, 0x28022221, 0x28022221, 0x27022221, 0x27022221, 0x270002a2, 0x270002a2}, - {0x0000a52c, 0x2d00062a, 0x2d00062a, 0x2c00062a, 0x2c00062a, 0x2a00062a, 0x2a00062a, 0x2c001203, 0x2c001203}, - {0x0000a530, 0x340220a5, 0x340220a5, 0x320220a5, 0x320220a5, 0x2f0220a5, 0x2f0220a5, 0x30001803, 0x30001803}, - {0x0000a534, 0x380022c5, 0x380022c5, 0x350022c5, 0x350022c5, 0x320022c5, 0x320022c5, 0x33000881, 0x33000881}, - {0x0000a538, 0x3b002486, 0x3b002486, 0x39002486, 0x39002486, 0x36002486, 0x36002486, 0x38001809, 0x38001809}, - {0x0000a53c, 0x3f00248a, 0x3f00248a, 0x3d00248a, 0x3d00248a, 0x3a00248a, 0x3a00248a, 0x3a000814, 0x3a000814}, - {0x0000a540, 0x4202242c, 0x4202242c, 0x4102242c, 0x4102242c, 0x3f02242c, 0x3f02242c, 0x3f001a0c, 0x3f001a0c}, - {0x0000a544, 0x490044c6, 0x490044c6, 0x460044c6, 0x460044c6, 0x420044c6, 0x420044c6, 0x43001a0e, 0x43001a0e}, - {0x0000a548, 0x4d024485, 0x4d024485, 0x4a024485, 0x4a024485, 0x46024485, 0x46024485, 0x46001812, 0x46001812}, - {0x0000a54c, 0x51044483, 0x51044483, 0x4e044483, 0x4e044483, 0x4a044483, 0x4a044483, 0x49001884, 0x49001884}, - {0x0000a550, 0x5404a40c, 0x5404a40c, 0x5204a40c, 0x5204a40c, 0x4d04a40c, 0x4d04a40c, 0x4d001e84, 0x4d001e84}, - {0x0000a554, 0x57024632, 0x57024632, 0x55024632, 0x55024632, 0x52024632, 0x52024632, 0x50001e69, 0x50001e69}, - {0x0000a558, 0x5c00a634, 0x5c00a634, 0x5900a634, 0x5900a634, 0x5600a634, 0x5600a634, 0x550006f4, 0x550006f4}, - {0x0000a55c, 0x5f026832, 0x5f026832, 0x5d026832, 0x5d026832, 0x5a026832, 0x5a026832, 0x59000ad3, 0x59000ad3}, - {0x0000a560, 0x6602b012, 0x6602b012, 0x6202b012, 0x6202b012, 0x5d02b012, 0x5d02b012, 0x5e000ad5, 0x5e000ad5}, - {0x0000a564, 0x6e02d0e1, 0x6e02d0e1, 0x6802d0e1, 0x6802d0e1, 0x6002d0e1, 0x6002d0e1, 0x61001ced, 0x61001ced}, - {0x0000a568, 0x7202b4c4, 0x7202b4c4, 0x6c02b4c4, 0x6c02b4c4, 0x6502b4c4, 0x6502b4c4, 0x660018d4, 0x660018d4}, - {0x0000a56c, 0x75007894, 0x75007894, 0x70007894, 0x70007894, 0x6b007894, 0x6b007894, 0x660018d4, 0x660018d4}, - {0x0000a570, 0x7b025c74, 0x7b025c74, 0x75025c74, 0x75025c74, 0x70025c74, 0x70025c74, 0x660018d4, 0x660018d4}, - {0x0000a574, 0x8300bcb5, 0x8300bcb5, 0x7a00bcb5, 0x7a00bcb5, 0x7600bcb5, 0x7600bcb5, 0x660018d4, 0x660018d4}, - {0x0000a578, 0x8a04dc74, 0x8a04dc74, 0x7f04dc74, 0x7f04dc74, 0x7c04dc74, 0x7c04dc74, 0x660018d4, 0x660018d4}, - {0x0000a57c, 0x8a04dc74, 0x8a04dc74, 0x7f04dc74, 0x7f04dc74, 0x7c04dc74, 0x7c04dc74, 0x660018d4, 0x660018d4}, + {0x0000a518, 0x1700002b, 0x1700002b, 0x1700002b, 0x1700002b, 0x1600002b, 0x1600002b, 0x1700000e, 0x1700000e}, + {0x0000a51c, 0x1b00002d, 0x1b00002d, 0x1b00002d, 0x1b00002d, 0x1a00002d, 0x1a00002d, 0x1b000064, 0x1b000064}, + {0x0000a520, 0x20000031, 0x20000031, 0x1f000031, 0x1f000031, 0x1e000031, 0x1e000031, 0x1f000242, 0x1f000242}, + {0x0000a524, 0x24000051, 0x24000051, 0x23000051, 0x23000051, 0x23000051, 0x23000051, 0x23000229, 0x23000229}, + {0x0000a528, 0x27000071, 0x27000071, 0x27000071, 0x27000071, 0x26000071, 0x26000071, 0x270002a2, 0x270002a2}, + {0x0000a52c, 0x2b000092, 0x2b000092, 0x2b000092, 0x2b000092, 0x2b000092, 0x2b000092, 0x2c001203, 0x2c001203}, + {0x0000a530, 0x3000028c, 0x3000028c, 0x2f00028c, 0x2f00028c, 0x2e00028c, 0x2e00028c, 0x30001803, 0x30001803}, + {0x0000a534, 0x34000290, 0x34000290, 0x33000290, 0x33000290, 0x32000290, 0x32000290, 0x33000881, 0x33000881}, + {0x0000a538, 0x37000292, 0x37000292, 0x36000292, 0x36000292, 0x35000292, 0x35000292, 0x38001809, 0x38001809}, + {0x0000a53c, 0x3b02028d, 0x3b02028d, 0x3a02028d, 0x3a02028d, 0x3902028d, 0x3902028d, 0x3a000814, 0x3a000814}, + {0x0000a540, 0x3f020291, 0x3f020291, 0x3e020291, 0x3e020291, 0x3d020291, 0x3d020291, 0x3f001a0c, 0x3f001a0c}, + {0x0000a544, 0x44020490, 0x44020490, 0x43020490, 0x43020490, 0x42020490, 0x42020490, 0x43001a0e, 0x43001a0e}, + {0x0000a548, 0x48020492, 0x48020492, 0x47020492, 0x47020492, 0x46020492, 0x46020492, 0x46001812, 0x46001812}, + {0x0000a54c, 0x4c020692, 0x4c020692, 0x4b020692, 0x4b020692, 0x4a020692, 0x4a020692, 0x49001884, 0x49001884}, + {0x0000a550, 0x50020892, 0x50020892, 0x4f020892, 0x4f020892, 0x4e020892, 0x4e020892, 0x4d001e84, 0x4d001e84}, + {0x0000a554, 0x53040891, 0x53040891, 0x53040891, 0x53040891, 0x52040891, 0x52040891, 0x50001e69, 0x50001e69}, + {0x0000a558, 0x58040893, 0x58040893, 0x57040893, 0x57040893, 0x56040893, 0x56040893, 0x550006f4, 0x550006f4}, + {0x0000a55c, 0x5c0408b4, 0x5c0408b4, 0x5a0408b4, 0x5a0408b4, 0x5a0408b4, 0x5a0408b4, 0x59000ad3, 0x59000ad3}, + {0x0000a560, 0x610408b6, 0x610408b6, 0x5e0408b6, 0x5e0408b6, 0x5e0408b6, 0x5e0408b6, 0x5e000ad5, 0x5e000ad5}, + {0x0000a564, 0x670408f6, 0x670408f6, 0x620408f6, 0x620408f6, 0x620408f6, 0x620408f6, 0x61001ced, 0x61001ced}, + {0x0000a568, 0x6a040cf6, 0x6a040cf6, 0x66040cf6, 0x66040cf6, 0x66040cf6, 0x66040cf6, 0x660018d4, 0x660018d4}, + {0x0000a56c, 0x6d040d76, 0x6d040d76, 0x6a040d76, 0x6a040d76, 0x6a040d76, 0x6a040d76, 0x660018d4, 0x660018d4}, + {0x0000a570, 0x70060db6, 0x70060db6, 0x6e060db6, 0x6e060db6, 0x6e060db6, 0x6e060db6, 0x660018d4, 0x660018d4}, + {0x0000a574, 0x730a0df6, 0x730a0df6, 0x720a0df6, 0x720a0df6, 0x720a0df6, 0x720a0df6, 0x660018d4, 0x660018d4}, + {0x0000a578, 0x770a13f6, 0x770a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x660018d4, 0x660018d4}, + {0x0000a57c, 0x770a13f6, 0x770a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x660018d4, 0x660018d4}, {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03804000, 0x03804000}, - {0x0000a610, 0x04c08c01, 0x04c08c01, 0x04808b01, 0x04808b01, 0x04808a01, 0x04808a01, 0x0300ca02, 0x0300ca02}, - {0x0000a614, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00000e04, 0x00000e04}, - {0x0000a618, 0x04010c01, 0x04010c01, 0x03c10b01, 0x03c10b01, 0x03810a01, 0x03810a01, 0x03014000, 0x03014000}, - {0x0000a61c, 0x03814e05, 0x03814e05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03414d05, 0x00000000, 0x00000000}, - {0x0000a620, 0x04010303, 0x04010303, 0x03c10303, 0x03c10303, 0x03810303, 0x03810303, 0x00000000, 0x00000000}, - {0x0000a624, 0x03814e05, 0x03814e05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03014000, 0x03014000}, - {0x0000a628, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x03804c05, 0x03804c05}, - {0x0000a62c, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x0701de06, 0x0701de06}, - {0x0000a630, 0x03418000, 0x03418000, 0x03018000, 0x03018000, 0x02c18000, 0x02c18000, 0x07819c07, 0x07819c07}, - {0x0000a634, 0x03815004, 0x03815004, 0x03414f04, 0x03414f04, 0x03414e04, 0x03414e04, 0x0701dc07, 0x0701dc07}, - {0x0000a638, 0x03005302, 0x03005302, 0x02c05202, 0x02c05202, 0x02805202, 0x02805202, 0x0701dc07, 0x0701dc07}, - {0x0000a63c, 0x04c09302, 0x04c09302, 0x04809202, 0x04809202, 0x04809202, 0x04809202, 0x0701dc07, 0x0701dc07}, - {0x0000b2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa}, - {0x0000b2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc}, - {0x0000b2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0}, + {0x0000a60c, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x03804000, 0x03804000}, + {0x0000a610, 0x04008b01, 0x04008b01, 0x04008b01, 0x04008b01, 0x03c08b01, 0x03c08b01, 0x0300ca02, 0x0300ca02}, + {0x0000a614, 0x05811403, 0x05811403, 0x05411303, 0x05411303, 0x05411303, 0x05411303, 0x00000e04, 0x00000e04}, + {0x0000a618, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x03014000, 0x03014000}, + {0x0000a61c, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x00000000, 0x00000000}, + {0x0000a620, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x00000000, 0x00000000}, + {0x0000a624, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x03014000, 0x03014000}, + {0x0000a628, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x03804c05, 0x03804c05}, + {0x0000a62c, 0x06815604, 0x06815604, 0x06415504, 0x06415504, 0x06015504, 0x06015504, 0x0701de06, 0x0701de06}, + {0x0000a630, 0x07819a05, 0x07819a05, 0x07419905, 0x07419905, 0x07019805, 0x07019805, 0x07819c07, 0x07819c07}, + {0x0000a634, 0x07819e06, 0x07819e06, 0x07419d06, 0x07419d06, 0x07019c06, 0x07019c06, 0x0701dc07, 0x0701dc07}, + {0x0000a638, 0x07819e06, 0x07819e06, 0x07419d06, 0x07419d06, 0x07019c06, 0x07019c06, 0x0701dc07, 0x0701dc07}, + {0x0000a63c, 0x07819e06, 0x07819e06, 0x07419d06, 0x07419d06, 0x07019c06, 0x07019c06, 0x0701dc07, 0x0701dc07}, + {0x0000b2dc, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xfffd5aaa, 0xfffd5aaa}, + {0x0000b2e0, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffe9ccc, 0xfffe9ccc}, + {0x0000b2e4, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffffe0f0, 0xffffe0f0}, {0x0000b2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00}, - {0x0000c2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa}, - {0x0000c2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc}, - {0x0000c2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0}, + {0x0000c2dc, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xfffd5aaa, 0xfffd5aaa}, + {0x0000c2e0, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffe9ccc, 0xfffe9ccc}, + {0x0000c2e4, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffffe0f0, 0xffffe0f0}, {0x0000c2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00}, {0x00016044, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4}, - {0x00016048, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401}, + {0x00016048, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401}, {0x00016280, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01808e84, 0x01808e84}, {0x00016444, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4}, - {0x00016448, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401}, + {0x00016448, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401}, {0x00016844, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4}, - {0x00016848, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401}, + {0x00016848, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401}, }; static const u32 ar955x_1p0_mac_core[][2] = { @@ -846,7 +846,7 @@ static const u32 ar955x_1p0_baseband_core[][2] = { {0x0000a44c, 0x00000001}, {0x0000a450, 0x00010000}, {0x0000a458, 0x00000000}, - {0x0000a644, 0x3fad9d74}, + {0x0000a644, 0xbfad9d74}, {0x0000a648, 0x0048060a}, {0x0000a64c, 0x00003c37}, {0x0000a670, 0x03020100}, @@ -1277,7 +1277,7 @@ static const u32 ar955x_1p0_modes_fast_clock[][3] = { {0x0000801c, 0x148ec02b, 0x148ec057}, {0x00008318, 0x000044c0, 0x00008980}, {0x00009e00, 0x0372131c, 0x0372131c}, - {0x0000a230, 0x0000000b, 0x00000016}, + {0x0000a230, 0x0000400b, 0x00004016}, {0x0000a254, 0x00000898, 0x00001130}, }; diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h index 6e1915aee712..28fd99203f64 100644 --- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h @@ -685,6 +685,82 @@ static const u32 ar9580_1p0_mixed_ob_db_tx_gain_table[][5] = { #define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2 +#define ar9580_1p0_type5_tx_gain_table ar9300Modes_type5_tx_gain_table_2p2 + +static const u32 ar9580_1p0_type6_tx_gain_table[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, + {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, + {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, + {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, + {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004}, + {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, + {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202}, + {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400}, + {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402}, + {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404}, + {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603}, + {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02}, + {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04}, + {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20}, + {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20}, + {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22}, + {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24}, + {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640}, + {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660}, + {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861}, + {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81}, + {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83}, + {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84}, + {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3}, + {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5}, + {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9}, + {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb}, + {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, + {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, + {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, + {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, + {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000}, + {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501}, + {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501}, + {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03}, + {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04}, + {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04}, + {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, + {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, + {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, + {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, + {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, + {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, + {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, + {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, + {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, + {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, + {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, + {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, + {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, + {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, + {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001}, + {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, + {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, + {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001}, + {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, + {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, + {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001}, + {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, +}; + static const u32 ar9580_1p0_soc_preamble[][2] = { /* Addr allmodes */ {0x000040a4, 0x00a0c1c9}, diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 42794c546a40..a56b2416e2f9 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -109,14 +109,11 @@ struct ath_descdma { void *dd_desc; dma_addr_t dd_desc_paddr; u32 dd_desc_len; - struct ath_buf *dd_bufptr; }; int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, struct list_head *head, const char *name, int nbuf, int ndesc, bool is_tx); -void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, - struct list_head *head); /***********/ /* RX / TX */ @@ -319,10 +316,11 @@ struct ath_rx { unsigned int rxfilter; struct list_head rxbuf; struct ath_descdma rxdma; - struct ath_buf *rx_bufptr; struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX]; struct sk_buff *frag; + + u32 ampdu_ref; }; int ath_startrecv(struct ath_softc *sc); @@ -336,14 +334,12 @@ void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq); void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq); void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq); void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); -bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx); -void ath_draintxq(struct ath_softc *sc, - struct ath_txq *txq, bool retry_tx); +bool ath_drain_all_txq(struct ath_softc *sc); +void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq); void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an); void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an); void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq); int ath_tx_init(struct ath_softc *sc, int nbufs); -void ath_tx_cleanup(struct ath_softc *sc); int ath_txq_update(struct ath_softc *sc, int qnum, struct ath9k_tx_queue_info *q); void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop); @@ -393,6 +389,7 @@ struct ath_beacon_config { u16 bmiss_timeout; u8 dtim_count; bool enable_beacon; + bool ibss_creator; }; struct ath_beacon { @@ -672,6 +669,23 @@ struct ath9k_vif_iter_data { int nadhocs; /* number of adhoc vifs */ }; +/* enum spectral_mode: + * + * @SPECTRAL_DISABLED: spectral mode is disabled + * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with + * something else. + * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples + * is performed manually. + * @SPECTRAL_CHANSCAN: Like manual, but also triggered when changing channels + * during a channel scan. + */ +enum spectral_mode { + SPECTRAL_DISABLED = 0, + SPECTRAL_BACKGROUND, + SPECTRAL_MANUAL, + SPECTRAL_CHANSCAN, +}; + struct ath_softc { struct ieee80211_hw *hw; struct device *dev; @@ -740,6 +754,11 @@ struct ath_softc { u8 ant_tx, ant_rx; struct dfs_pattern_detector *dfs_detector; u32 wow_enabled; + /* relay(fs) channel for spectral scan */ + struct rchan *rfs_chan_spec_scan; + enum spectral_mode spectral_mode; + struct ath_spec_scan spec_config; + int scanning; #ifdef CONFIG_PM_SLEEP atomic_t wow_got_bmiss_intr; @@ -748,6 +767,133 @@ struct ath_softc { #endif }; +#define SPECTRAL_SCAN_BITMASK 0x10 +/* Radar info packet format, used for DFS and spectral formats. */ +struct ath_radar_info { + u8 pulse_length_pri; + u8 pulse_length_ext; + u8 pulse_bw_info; +} __packed; + +/* The HT20 spectral data has 4 bytes of additional information at it's end. + * + * [7:0]: all bins {max_magnitude[1:0], bitmap_weight[5:0]} + * [7:0]: all bins max_magnitude[9:2] + * [7:0]: all bins {max_index[5:0], max_magnitude[11:10]} + * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned) + */ +struct ath_ht20_mag_info { + u8 all_bins[3]; + u8 max_exp; +} __packed; + +#define SPECTRAL_HT20_NUM_BINS 56 + +/* WARNING: don't actually use this struct! MAC may vary the amount of + * data by -1/+2. This struct is for reference only. + */ +struct ath_ht20_fft_packet { + u8 data[SPECTRAL_HT20_NUM_BINS]; + struct ath_ht20_mag_info mag_info; + struct ath_radar_info radar_info; +} __packed; + +#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet)) + +/* Dynamic 20/40 mode: + * + * [7:0]: lower bins {max_magnitude[1:0], bitmap_weight[5:0]} + * [7:0]: lower bins max_magnitude[9:2] + * [7:0]: lower bins {max_index[5:0], max_magnitude[11:10]} + * [7:0]: upper bins {max_magnitude[1:0], bitmap_weight[5:0]} + * [7:0]: upper bins max_magnitude[9:2] + * [7:0]: upper bins {max_index[5:0], max_magnitude[11:10]} + * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned) + */ +struct ath_ht20_40_mag_info { + u8 lower_bins[3]; + u8 upper_bins[3]; + u8 max_exp; +} __packed; + +#define SPECTRAL_HT20_40_NUM_BINS 128 + +/* WARNING: don't actually use this struct! MAC may vary the amount of + * data. This struct is for reference only. + */ +struct ath_ht20_40_fft_packet { + u8 data[SPECTRAL_HT20_40_NUM_BINS]; + struct ath_ht20_40_mag_info mag_info; + struct ath_radar_info radar_info; +} __packed; + + +#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet)) + +/* grabs the max magnitude from the all/upper/lower bins */ +static inline u16 spectral_max_magnitude(u8 *bins) +{ + return (bins[0] & 0xc0) >> 6 | + (bins[1] & 0xff) << 2 | + (bins[2] & 0x03) << 10; +} + +/* return the max magnitude from the all/upper/lower bins */ +static inline u8 spectral_max_index(u8 *bins) +{ + s8 m = (bins[2] & 0xfc) >> 2; + + /* TODO: this still doesn't always report the right values ... */ + if (m > 32) + m |= 0xe0; + else + m &= ~0xe0; + + return m + 29; +} + +/* return the bitmap weight from the all/upper/lower bins */ +static inline u8 spectral_bitmap_weight(u8 *bins) +{ + return bins[0] & 0x3f; +} + +/* FFT sample format given to userspace via debugfs. + * + * Please keep the type/length at the front position and change + * other fields after adding another sample type + * + * TODO: this might need rework when switching to nl80211-based + * interface. + */ +enum ath_fft_sample_type { + ATH_FFT_SAMPLE_HT20 = 1, +}; + +struct fft_sample_tlv { + u8 type; /* see ath_fft_sample */ + __be16 length; + /* type dependent data follows */ +} __packed; + +struct fft_sample_ht20 { + struct fft_sample_tlv tlv; + + u8 max_exp; + + __be16 freq; + s8 rssi; + s8 noise; + + __be16 max_magnitude; + u8 max_index; + u8 bitmap_weight; + + __be64 tsf; + + u8 data[SPECTRAL_HT20_NUM_BINS]; +} __packed; + void ath9k_tasklet(unsigned long data); int ath_cabq_update(struct ath_softc *); @@ -770,6 +916,10 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); void ath9k_reload_chainmask_settings(struct ath_softc *sc); bool ath9k_uses_beacons(int type); +void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw); +int ath9k_spectral_scan_config(struct ieee80211_hw *hw, + enum spectral_mode spectral_mode); + #ifdef CONFIG_ATH9K_PCI int ath_pci_init(void); diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 2ca355e94da6..5f05c26d1ec4 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -199,7 +199,7 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw, if (sc->nvifs > 1) { ath_dbg(common, BEACON, "Flushing previous cabq traffic\n"); - ath_draintxq(sc, cabq, false); + ath_draintxq(sc, cabq); } } @@ -407,12 +407,17 @@ void ath9k_beacon_tasklet(unsigned long data) } } -static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt, u32 intval) +/* + * Both nexttbtt and intval have to be in usecs. + */ +static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt, + u32 intval, bool reset_tsf) { struct ath_hw *ah = sc->sc_ah; ath9k_hw_disable_interrupts(ah); - ath9k_hw_reset_tsf(ah); + if (reset_tsf) + ath9k_hw_reset_tsf(ah); ath9k_beaconq_config(sc); ath9k_hw_beaconinit(ah, nexttbtt, intval); sc->beacon.bmisscnt = 0; @@ -442,10 +447,12 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc, else ah->imask &= ~ATH9K_INT_SWBA; - ath_dbg(common, BEACON, "AP nexttbtt: %u intval: %u conf_intval: %u\n", + ath_dbg(common, BEACON, + "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n", + (conf->enable_beacon) ? "Enable" : "Disable", nexttbtt, intval, conf->beacon_interval); - ath9k_beacon_init(sc, nexttbtt, intval); + ath9k_beacon_init(sc, nexttbtt, intval, true); } /* @@ -586,17 +593,45 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc, ath9k_reset_beacon_status(sc); intval = TU_TO_USEC(conf->beacon_interval); - nexttbtt = intval; + + if (conf->ibss_creator) { + nexttbtt = intval; + } else { + u32 tbtt, offset, tsftu; + u64 tsf; + + /* + * Pull nexttbtt forward to reflect the current + * sync'd TSF. + */ + tsf = ath9k_hw_gettsf64(ah); + tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE; + offset = tsftu % conf->beacon_interval; + tbtt = tsftu - offset; + if (offset) + tbtt += conf->beacon_interval; + + nexttbtt = TU_TO_USEC(tbtt); + } if (conf->enable_beacon) ah->imask |= ATH9K_INT_SWBA; else ah->imask &= ~ATH9K_INT_SWBA; - ath_dbg(common, BEACON, "IBSS nexttbtt: %u intval: %u conf_intval: %u\n", + ath_dbg(common, BEACON, + "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n", + (conf->enable_beacon) ? "Enable" : "Disable", nexttbtt, intval, conf->beacon_interval); - ath9k_beacon_init(sc, nexttbtt, intval); + ath9k_beacon_init(sc, nexttbtt, intval, conf->ibss_creator); + + /* + * Set the global 'beacon has been configured' flag for the + * joiner case in IBSS mode. + */ + if (!conf->ibss_creator && conf->enable_beacon) + set_bit(SC_OP_BEACONS, &sc->sc_flags); } bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif) @@ -639,6 +674,7 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc, cur_conf->dtim_period = bss_conf->dtim_period; cur_conf->listen_interval = 1; cur_conf->dtim_count = 1; + cur_conf->ibss_creator = bss_conf->ibss_creator; cur_conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval; @@ -666,34 +702,59 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif, { struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; + unsigned long flags; + bool skip_beacon = false; if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) { ath9k_cache_beacon_config(sc, bss_conf); ath9k_set_beacon(sc); set_bit(SC_OP_BEACONS, &sc->sc_flags); - } else { - /* - * Take care of multiple interfaces when - * enabling/disabling SWBA. - */ - if (changed & BSS_CHANGED_BEACON_ENABLED) { - if (!bss_conf->enable_beacon && - (sc->nbcnvifs <= 1)) { - cur_conf->enable_beacon = false; - } else if (bss_conf->enable_beacon) { - cur_conf->enable_beacon = true; - ath9k_cache_beacon_config(sc, bss_conf); - } + return; + + } + + /* + * Take care of multiple interfaces when + * enabling/disabling SWBA. + */ + if (changed & BSS_CHANGED_BEACON_ENABLED) { + if (!bss_conf->enable_beacon && + (sc->nbcnvifs <= 1)) { + cur_conf->enable_beacon = false; + } else if (bss_conf->enable_beacon) { + cur_conf->enable_beacon = true; + ath9k_cache_beacon_config(sc, bss_conf); } + } - if (cur_conf->beacon_interval) { + /* + * Configure the HW beacon registers only when we have a valid + * beacon interval. + */ + if (cur_conf->beacon_interval) { + /* + * If we are joining an existing IBSS network, start beaconing + * only after a TSF-sync has taken place. Ensure that this + * happens by setting the appropriate flags. + */ + if ((changed & BSS_CHANGED_IBSS) && !bss_conf->ibss_creator && + bss_conf->enable_beacon) { + spin_lock_irqsave(&sc->sc_pm_lock, flags); + sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; + spin_unlock_irqrestore(&sc->sc_pm_lock, flags); + skip_beacon = true; + } else { ath9k_set_beacon(sc); - - if (cur_conf->enable_beacon) - set_bit(SC_OP_BEACONS, &sc->sc_flags); - else - clear_bit(SC_OP_BEACONS, &sc->sc_flags); } + + /* + * Do not set the SC_OP_BEACONS flag for IBSS joiner mode + * here, it is done in ath9k_beacon_config_adhoc(). + */ + if (cur_conf->enable_beacon && !skip_beacon) + set_bit(SC_OP_BEACONS, &sc->sc_flags); + else + clear_bit(SC_OP_BEACONS, &sc->sc_flags); } } diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index e585fc827c50..3714b971d18e 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -17,6 +17,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/export.h> +#include <linux/relay.h> #include <asm/unaligned.h> #include "ath9k.h" @@ -894,6 +895,7 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf, RXS_ERR("RX-Bytes-All", rx_bytes_all); RXS_ERR("RX-Beacons", rx_beacons); RXS_ERR("RX-Frags", rx_frags); + RXS_ERR("RX-Spectral", rx_spectral); if (len > size) len = size; @@ -965,6 +967,290 @@ static const struct file_operations fops_recv = { .llseek = default_llseek, }; +static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char *mode = ""; + unsigned int len; + + switch (sc->spectral_mode) { + case SPECTRAL_DISABLED: + mode = "disable"; + break; + case SPECTRAL_BACKGROUND: + mode = "background"; + break; + case SPECTRAL_CHANSCAN: + mode = "chanscan"; + break; + case SPECTRAL_MANUAL: + mode = "manual"; + break; + } + len = strlen(mode); + return simple_read_from_buffer(user_buf, count, ppos, mode, len); +} + +static ssize_t write_file_spec_scan_ctl(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + + if (strncmp("trigger", buf, 7) == 0) { + ath9k_spectral_scan_trigger(sc->hw); + } else if (strncmp("background", buf, 9) == 0) { + ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND); + ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n"); + } else if (strncmp("chanscan", buf, 8) == 0) { + ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN); + ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n"); + } else if (strncmp("manual", buf, 6) == 0) { + ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL); + ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n"); + } else if (strncmp("disable", buf, 7) == 0) { + ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED); + ath_dbg(common, CONFIG, "spectral scan: disabled\n"); + } else { + return -EINVAL; + } + + return count; +} + +static const struct file_operations fops_spec_scan_ctl = { + .read = read_file_spec_scan_ctl, + .write = write_file_spec_scan_ctl, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_spectral_short_repeat(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[32]; + unsigned int len; + + len = sprintf(buf, "%d\n", sc->spec_config.short_repeat); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_spectral_short_repeat(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + unsigned long val; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < 0 || val > 1) + return -EINVAL; + + sc->spec_config.short_repeat = val; + return count; +} + +static const struct file_operations fops_spectral_short_repeat = { + .read = read_file_spectral_short_repeat, + .write = write_file_spectral_short_repeat, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_spectral_count(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[32]; + unsigned int len; + + len = sprintf(buf, "%d\n", sc->spec_config.count); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_spectral_count(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + unsigned long val; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < 0 || val > 255) + return -EINVAL; + + sc->spec_config.count = val; + return count; +} + +static const struct file_operations fops_spectral_count = { + .read = read_file_spectral_count, + .write = write_file_spectral_count, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_spectral_period(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[32]; + unsigned int len; + + len = sprintf(buf, "%d\n", sc->spec_config.period); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_spectral_period(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + unsigned long val; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < 0 || val > 255) + return -EINVAL; + + sc->spec_config.period = val; + return count; +} + +static const struct file_operations fops_spectral_period = { + .read = read_file_spectral_period, + .write = write_file_spectral_period, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_spectral_fft_period(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[32]; + unsigned int len; + + len = sprintf(buf, "%d\n", sc->spec_config.fft_period); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_spectral_fft_period(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + unsigned long val; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < 0 || val > 15) + return -EINVAL; + + sc->spec_config.fft_period = val; + return count; +} + +static const struct file_operations fops_spectral_fft_period = { + .read = read_file_spectral_fft_period, + .write = write_file_spectral_fft_period, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static struct dentry *create_buf_file_handler(const char *filename, + struct dentry *parent, + umode_t mode, + struct rchan_buf *buf, + int *is_global) +{ + struct dentry *buf_file; + + buf_file = debugfs_create_file(filename, mode, parent, buf, + &relay_file_operations); + *is_global = 1; + return buf_file; +} + +static int remove_buf_file_handler(struct dentry *dentry) +{ + debugfs_remove(dentry); + + return 0; +} + +void ath_debug_send_fft_sample(struct ath_softc *sc, + struct fft_sample_tlv *fft_sample_tlv) +{ + int length; + if (!sc->rfs_chan_spec_scan) + return; + + length = __be16_to_cpu(fft_sample_tlv->length) + + sizeof(*fft_sample_tlv); + relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length); +} + +static struct rchan_callbacks rfs_spec_scan_cb = { + .create_buf_file = create_buf_file_handler, + .remove_buf_file = remove_buf_file_handler, +}; + + static ssize_t read_file_regidx(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -1779,6 +2065,24 @@ int ath9k_init_debug(struct ath_hw *ah) &fops_base_eeprom); debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_modal_eeprom); + sc->rfs_chan_spec_scan = relay_open("spectral_scan", + sc->debug.debugfs_phy, + 262144, 4, &rfs_spec_scan_cb, + NULL); + debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR, + sc->debug.debugfs_phy, sc, + &fops_spec_scan_ctl); + debugfs_create_file("spectral_short_repeat", S_IRUSR | S_IWUSR, + sc->debug.debugfs_phy, sc, + &fops_spectral_short_repeat); + debugfs_create_file("spectral_count", S_IRUSR | S_IWUSR, + sc->debug.debugfs_phy, sc, &fops_spectral_count); + debugfs_create_file("spectral_period", S_IRUSR | S_IWUSR, + sc->debug.debugfs_phy, sc, &fops_spectral_period); + debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR, + sc->debug.debugfs_phy, sc, + &fops_spectral_fft_period); + #ifdef CONFIG_ATH9K_MAC_DEBUG debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_samps); diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 6df2ab62dcb7..410d6d8f1aa7 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -23,6 +23,7 @@ struct ath_txq; struct ath_buf; +struct fft_sample_tlv; #ifdef CONFIG_ATH9K_DEBUGFS #define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++ @@ -218,6 +219,7 @@ struct ath_tx_stats { * @rx_too_many_frags_err: Frames dropped due to too-many-frags received. * @rx_beacons: No. of beacons received. * @rx_frags: No. of rx-fragements received. + * @rx_spectral: No of spectral packets received. */ struct ath_rx_stats { u32 rx_pkts_all; @@ -236,6 +238,7 @@ struct ath_rx_stats { u32 rx_too_many_frags_err; u32 rx_beacons; u32 rx_frags; + u32 rx_spectral; }; struct ath_stats { @@ -321,6 +324,10 @@ void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); + +void ath_debug_send_fft_sample(struct ath_softc *sc, + struct fft_sample_tlv *fft_sample); + #else #define RX_STAT_INC(c) /* NOP */ diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c index 24877b00cbf4..467b60014b7b 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c @@ -288,11 +288,11 @@ struct dfs_pattern_detector * dfs_pattern_detector_init(enum nl80211_dfs_regions region) { struct dfs_pattern_detector *dpd; + dpd = kmalloc(sizeof(*dpd), GFP_KERNEL); - if (dpd == NULL) { - pr_err("allocation of dfs_pattern_detector failed\n"); + if (dpd == NULL) return NULL; - } + *dpd = default_dpd; INIT_LIST_HEAD(&dpd->channel_detectors); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 05d5ba66cac3..716058b67557 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -280,14 +280,14 @@ err: return ret; } -static int ath9k_reg_notifier(struct wiphy *wiphy, - struct regulatory_request *request) +static void ath9k_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct ath9k_htc_priv *priv = hw->priv; - return ath_reg_notifier_apply(wiphy, request, - ath9k_hw_regulatory(priv->ah)); + ath_reg_notifier_apply(wiphy, request, + ath9k_hw_regulatory(priv->ah)); } static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset) @@ -783,7 +783,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv) priv->fw_version_major = be16_to_cpu(cmd_rsp.major); priv->fw_version_minor = be16_to_cpu(cmd_rsp.minor); - snprintf(hw->wiphy->fw_version, ETHTOOL_BUSINFO_LEN, "%d.%d", + snprintf(hw->wiphy->fw_version, sizeof(hw->wiphy->fw_version), "%d.%d", priv->fw_version_major, priv->fw_version_minor); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 9c07a8fa5134..a8016d70088a 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1628,7 +1628,9 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw, if (!ret) ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; - case IEEE80211_AMPDU_TX_STOP: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index b6a5a08810b8..3ad1fd05c5e7 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -1196,20 +1196,17 @@ void ath9k_rx_cleanup(struct ath9k_htc_priv *priv) int ath9k_rx_init(struct ath9k_htc_priv *priv) { - struct ath_hw *ah = priv->ah; - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_htc_rxbuf *rxbuf; int i = 0; INIT_LIST_HEAD(&priv->rx.rxbuf); spin_lock_init(&priv->rx.rxbuflock); for (i = 0; i < ATH9K_HTC_RXBUF; i++) { - rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL); - if (rxbuf == NULL) { - ath_err(common, "Unable to allocate RX buffers\n"); + struct ath9k_htc_rxbuf *rxbuf = + kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL); + if (rxbuf == NULL) goto err; - } + list_add_tail(&rxbuf->list, &priv->rx.rxbuf); } diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h index 0f2b97f6b739..14b701140b49 100644 --- a/drivers/net/wireless/ath/ath9k/hw-ops.h +++ b/drivers/net/wireless/ath/ath9k/hw-ops.h @@ -101,22 +101,6 @@ static inline void ath9k_hw_spur_mitigate_freq(struct ath_hw *ah, ath9k_hw_private_ops(ah)->spur_mitigate_freq(ah, chan); } -static inline int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah) -{ - if (!ath9k_hw_private_ops(ah)->rf_alloc_ext_banks) - return 0; - - return ath9k_hw_private_ops(ah)->rf_alloc_ext_banks(ah); -} - -static inline void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah) -{ - if (!ath9k_hw_private_ops(ah)->rf_free_ext_banks) - return; - - ath9k_hw_private_ops(ah)->rf_free_ext_banks(ah); -} - static inline bool ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan, u16 modesIndex) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 7cb787065913..2a2ae403e0e5 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -54,11 +54,6 @@ static void ath9k_hw_init_cal_settings(struct ath_hw *ah) ath9k_hw_private_ops(ah)->init_cal_settings(ah); } -static void ath9k_hw_init_mode_regs(struct ath_hw *ah) -{ - ath9k_hw_private_ops(ah)->init_mode_regs(ah); -} - static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah, struct ath9k_channel *chan) { @@ -208,7 +203,7 @@ void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan, udelay(hw_delay + BASE_ACTIVATE_DELAY); } -void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array, +void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array, int column, unsigned int *writecnt) { int r; @@ -554,28 +549,19 @@ static int ath9k_hw_post_init(struct ath_hw *ah) ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah)); - ecode = ath9k_hw_rf_alloc_ext_banks(ah); - if (ecode) { - ath_err(ath9k_hw_common(ah), - "Failed allocating banks for external radio\n"); - ath9k_hw_rf_free_ext_banks(ah); - return ecode; - } - - if (ah->config.enable_ani) { - ath9k_hw_ani_setup(ah); + if (ah->config.enable_ani) ath9k_hw_ani_init(ah); - } return 0; } -static void ath9k_hw_attach_ops(struct ath_hw *ah) +static int ath9k_hw_attach_ops(struct ath_hw *ah) { - if (AR_SREV_9300_20_OR_LATER(ah)) - ar9003_hw_attach_ops(ah); - else - ar9002_hw_attach_ops(ah); + if (!AR_SREV_9300_20_OR_LATER(ah)) + return ar9002_hw_attach_ops(ah); + + ar9003_hw_attach_ops(ah); + return 0; } /* Called for all hardware families */ @@ -611,7 +597,9 @@ static int __ath9k_hw_init(struct ath_hw *ah) ath9k_hw_init_defaults(ah); ath9k_hw_init_config(ah); - ath9k_hw_attach_ops(ah); + r = ath9k_hw_attach_ops(ah); + if (r) + return r; if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { ath_err(common, "Couldn't wakeup chip\n"); @@ -675,8 +663,6 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (!AR_SREV_9300_20_OR_LATER(ah)) ah->ani_function &= ~ATH9K_ANI_MRC_CCK; - ath9k_hw_init_mode_regs(ah); - if (!ah->is_pciexpress) ath9k_hw_disablepcie(ah); @@ -1153,12 +1139,9 @@ void ath9k_hw_deinit(struct ath_hw *ah) struct ath_common *common = ath9k_hw_common(ah); if (common->state < ATH_HW_INITIALIZED) - goto free_hw; + return; ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); - -free_hw: - ath9k_hw_rf_free_ext_banks(ah); } EXPORT_SYMBOL(ath9k_hw_deinit); @@ -2576,12 +2559,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) rx_chainmask >>= 1; } - if (AR_SREV_9300_20_OR_LATER(ah)) { - ah->enabled_cals |= TX_IQ_CAL; - if (AR_SREV_9485_OR_LATER(ah)) - ah->enabled_cals |= TX_IQ_ON_AGC_CAL; - } - if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE)) pCap->hw_caps |= ATH9K_HW_CAP_MCI; @@ -2590,7 +2567,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) pCap->hw_caps |= ATH9K_HW_CAP_RTT; } - if (AR_SREV_9280_20_OR_LATER(ah)) { pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE | ATH9K_HW_WOW_PATTERN_MATCH_EXACT; @@ -3005,13 +2981,8 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, struct ath_gen_timer *timer; timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL); - - if (timer == NULL) { - ath_err(ath9k_hw_common(ah), - "Failed to allocate memory for hw timer[%d]\n", - timer_index); + if (timer == NULL) return NULL; - } /* allocate a hardware generic timer slot */ timer_table->timers[timer_index] = timer; diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 9d26fc56ca56..784e81ccb903 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -397,6 +397,7 @@ enum ath9k_int { #define MAX_RTT_TABLE_ENTRY 6 #define MAX_IQCAL_MEASUREMENT 8 #define MAX_CL_TAB_ENTRY 16 +#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j)) struct ath9k_hw_cal_data { u16 channel; @@ -599,13 +600,10 @@ struct ath_hw_radar_conf { * @init_cal_settings: setup types of calibrations supported * @init_cal: starts actual calibration * - * @init_mode_regs: Initializes mode registers * @init_mode_gain_regs: Initialize TX/RX gain registers * * @rf_set_freq: change frequency * @spur_mitigate_freq: spur mitigation - * @rf_alloc_ext_banks: - * @rf_free_ext_banks: * @set_rf_regs: * @compute_pll_control: compute the PLL control value to use for * AR_RTC_PLL_CONTROL for a given channel @@ -620,7 +618,6 @@ struct ath_hw_private_ops { void (*init_cal_settings)(struct ath_hw *ah); bool (*init_cal)(struct ath_hw *ah, struct ath9k_channel *chan); - void (*init_mode_regs)(struct ath_hw *ah); void (*init_mode_gain_regs)(struct ath_hw *ah); void (*setup_calibration)(struct ath_hw *ah, struct ath9k_cal_list *currCal); @@ -630,8 +627,6 @@ struct ath_hw_private_ops { struct ath9k_channel *chan); void (*spur_mitigate_freq)(struct ath_hw *ah, struct ath9k_channel *chan); - int (*rf_alloc_ext_banks)(struct ath_hw *ah); - void (*rf_free_ext_banks)(struct ath_hw *ah); bool (*set_rf_regs)(struct ath_hw *ah, struct ath9k_channel *chan, u16 modesIndex); @@ -661,6 +656,37 @@ struct ath_hw_private_ops { }; /** + * struct ath_spec_scan - parameters for Atheros spectral scan + * + * @enabled: enable/disable spectral scan + * @short_repeat: controls whether the chip is in spectral scan mode + * for 4 usec (enabled) or 204 usec (disabled) + * @count: number of scan results requested. There are special meanings + * in some chip revisions: + * AR92xx: highest bit set (>=128) for endless mode + * (spectral scan won't stopped until explicitly disabled) + * AR9300 and newer: 0 for endless mode + * @endless: true if endless mode is intended. Otherwise, count value is + * corrected to the next possible value. + * @period: time duration between successive spectral scan entry points + * (period*256*Tclk). Tclk = ath_common->clockrate + * @fft_period: PHY passes FFT frames to MAC every (fft_period+1)*4uS + * + * Note: Tclk = 40MHz or 44MHz depending upon operating mode. + * Typically it's 44MHz in 2/5GHz on later chips, but there's + * a "fast clock" check for this in 5GHz. + * + */ +struct ath_spec_scan { + bool enabled; + bool short_repeat; + bool endless; + u8 count; + u8 period; + u8 fft_period; +}; + +/** * struct ath_hw_ops - callbacks used by hardware code and driver code * * This structure contains callbacks designed to to be used internally by @@ -668,6 +694,10 @@ struct ath_hw_private_ops { * * @config_pci_powersave: * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC + * + * @spectral_scan_config: set parameters for spectral scan and enable/disable it + * @spectral_scan_trigger: trigger a spectral scan run + * @spectral_scan_wait: wait for a spectral scan run to finish */ struct ath_hw_ops { void (*config_pci_powersave)(struct ath_hw *ah, @@ -688,6 +718,10 @@ struct ath_hw_ops { void (*antdiv_comb_conf_set)(struct ath_hw *ah, struct ath_hw_antcomb_conf *antconf); void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable); + void (*spectral_scan_config)(struct ath_hw *ah, + struct ath_spec_scan *param); + void (*spectral_scan_trigger)(struct ath_hw *ah); + void (*spectral_scan_wait)(struct ath_hw *ah); }; struct ath_nf_limits { @@ -710,6 +744,7 @@ enum ath_cal_list { struct ath_hw { struct ath_ops reg_ops; + struct device *dev; struct ieee80211_hw *hw; struct ath_common common; struct ath9k_hw_version hw_version; @@ -771,7 +806,6 @@ struct ath_hw { struct ath9k_cal_list iq_caldata; struct ath9k_cal_list adcgain_caldata; struct ath9k_cal_list adcdc_caldata; - struct ath9k_cal_list tempCompCalData; struct ath9k_cal_list *cal_list; struct ath9k_cal_list *cal_list_last; struct ath9k_cal_list *cal_list_curr; @@ -830,10 +864,6 @@ struct ath_hw { /* ANI */ u32 proc_phyerr; u32 aniperiod; - int totalSizeDesired[5]; - int coarse_high[5]; - int coarse_low[5]; - int firpwr[5]; enum ath9k_ani_cmd ani_function; u32 ani_skip_count; @@ -979,7 +1009,7 @@ void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna); void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan, int hw_delay); bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout); -void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array, +void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array, int column, unsigned int *writecnt); u32 ath9k_hw_reverse_bits(u32 val, u32 n); u16 ath9k_hw_computetxtime(struct ath_hw *ah, @@ -1069,14 +1099,14 @@ bool ar9003_is_paprd_enabled(struct ath_hw *ah); void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx); /* Hardware family op attach helpers */ -void ar5008_hw_attach_phy_ops(struct ath_hw *ah); +int ar5008_hw_attach_phy_ops(struct ath_hw *ah); void ar9002_hw_attach_phy_ops(struct ath_hw *ah); void ar9003_hw_attach_phy_ops(struct ath_hw *ah); void ar9002_hw_attach_calib_ops(struct ath_hw *ah); void ar9003_hw_attach_calib_ops(struct ath_hw *ah); -void ar9002_hw_attach_ops(struct ath_hw *ah); +int ar9002_hw_attach_ops(struct ath_hw *ah); void ar9003_hw_attach_ops(struct ath_hw *ah); void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan); diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index f69ef5d48c7b..af932c9444de 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -20,6 +20,7 @@ #include <linux/slab.h> #include <linux/ath9k_platform.h> #include <linux/module.h> +#include <linux/relay.h> #include "ath9k.h" @@ -302,16 +303,15 @@ static void setup_ht_cap(struct ath_softc *sc, ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; } -static int ath9k_reg_notifier(struct wiphy *wiphy, - struct regulatory_request *request) +static void ath9k_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_regulatory *reg = ath9k_hw_regulatory(ah); - int ret; - ret = ath_reg_notifier_apply(wiphy, request, reg); + ath_reg_notifier_apply(wiphy, request, reg); /* Set tx power */ if (ah->curchan) { @@ -321,8 +321,6 @@ static int ath9k_reg_notifier(struct wiphy *wiphy, sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit; ath9k_ps_restore(sc); } - - return ret; } /* @@ -337,7 +335,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, struct ath_common *common = ath9k_hw_common(sc->sc_ah); u8 *ds; struct ath_buf *bf; - int i, bsize, error, desc_len; + int i, bsize, desc_len; ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n", name, nbuf, ndesc); @@ -353,8 +351,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, if ((desc_len % 4) != 0) { ath_err(common, "ath_desc not DWORD aligned\n"); BUG_ON((desc_len % 4) != 0); - error = -ENOMEM; - goto fail; + return -ENOMEM; } dd->dd_desc_len = desc_len * nbuf * ndesc; @@ -378,12 +375,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, } /* allocate descriptors */ - dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, - &dd->dd_desc_paddr, GFP_KERNEL); - if (dd->dd_desc == NULL) { - error = -ENOMEM; - goto fail; - } + dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len, + &dd->dd_desc_paddr, GFP_KERNEL); + if (!dd->dd_desc) + return -ENOMEM; + ds = (u8 *) dd->dd_desc; ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", name, ds, (u32) dd->dd_desc_len, @@ -391,12 +387,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, /* allocate buffers */ bsize = sizeof(struct ath_buf) * nbuf; - bf = kzalloc(bsize, GFP_KERNEL); - if (bf == NULL) { - error = -ENOMEM; - goto fail2; - } - dd->dd_bufptr = bf; + bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL); + if (!bf) + return -ENOMEM; for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) { bf->bf_desc = ds; @@ -422,12 +415,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, list_add_tail(&bf->list, head); } return 0; -fail2: - dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, - dd->dd_desc_paddr); -fail: - memset(dd, 0, sizeof(*dd)); - return error; } static int ath9k_init_queues(struct ath_softc *sc) @@ -457,11 +444,13 @@ static int ath9k_init_channels_rates(struct ath_softc *sc) ATH9K_NUM_CHANNELS); if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) { - channels = kmemdup(ath9k_2ghz_chantable, + channels = devm_kzalloc(sc->dev, sizeof(ath9k_2ghz_chantable), GFP_KERNEL); if (!channels) return -ENOMEM; + memcpy(channels, ath9k_2ghz_chantable, + sizeof(ath9k_2ghz_chantable)); sc->sbands[IEEE80211_BAND_2GHZ].channels = channels; sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; sc->sbands[IEEE80211_BAND_2GHZ].n_channels = @@ -472,14 +461,13 @@ static int ath9k_init_channels_rates(struct ath_softc *sc) } if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) { - channels = kmemdup(ath9k_5ghz_chantable, + channels = devm_kzalloc(sc->dev, sizeof(ath9k_5ghz_chantable), GFP_KERNEL); - if (!channels) { - if (sc->sbands[IEEE80211_BAND_2GHZ].channels) - kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels); + if (!channels) return -ENOMEM; - } + memcpy(channels, ath9k_5ghz_chantable, + sizeof(ath9k_5ghz_chantable)); sc->sbands[IEEE80211_BAND_5GHZ].channels = channels; sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; sc->sbands[IEEE80211_BAND_5GHZ].n_channels = @@ -509,6 +497,13 @@ static void ath9k_init_misc(struct ath_softc *sc) if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT; + + sc->spec_config.enabled = 0; + sc->spec_config.short_repeat = true; + sc->spec_config.count = 8; + sc->spec_config.endless = false; + sc->spec_config.period = 0xFF; + sc->spec_config.fft_period = 0xF; } static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, @@ -565,10 +560,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, int ret = 0, i; int csz = 0; - ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL); + ah = devm_kzalloc(sc->dev, sizeof(struct ath_hw), GFP_KERNEL); if (!ah) return -ENOMEM; + ah->dev = sc->dev; ah->hw = sc->hw; ah->hw_version.devid = devid; ah->reg_ops.read = ath9k_ioread32; @@ -636,7 +632,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, if (pdata && pdata->eeprom_name) { ret = ath9k_eeprom_request(sc, pdata->eeprom_name); if (ret) - goto err_eeprom; + return ret; } /* Initializes the hardware for all supported chipsets */ @@ -676,10 +672,6 @@ err_queues: ath9k_hw_deinit(ah); err_hw: ath9k_eeprom_release(sc); -err_eeprom: - kfree(ah); - sc->sc_ah = NULL; - return ret; } @@ -844,8 +836,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, /* Bring up device */ error = ath9k_init_softc(devid, sc, bus_ops); - if (error != 0) - goto error_init; + if (error) + return error; ah = sc->sc_ah; common = ath9k_hw_common(ah); @@ -855,19 +847,19 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, error = ath_regd_init(&common->regulatory, sc->hw->wiphy, ath9k_reg_notifier); if (error) - goto error_regd; + goto deinit; reg = &common->regulatory; /* Setup TX DMA */ error = ath_tx_init(sc, ATH_TXBUF); if (error != 0) - goto error_tx; + goto deinit; /* Setup RX DMA */ error = ath_rx_init(sc, ATH_RXBUF); if (error != 0) - goto error_rx; + goto deinit; ath9k_init_txpower_limits(sc); @@ -881,19 +873,19 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, /* Register with mac80211 */ error = ieee80211_register_hw(hw); if (error) - goto error_register; + goto rx_cleanup; error = ath9k_init_debug(ah); if (error) { ath_err(common, "Unable to create debugfs files\n"); - goto error_world; + goto unregister; } /* Handle world regulatory */ if (!ath_is_world_regd(reg)) { error = regulatory_hint(hw->wiphy, reg->alpha2); if (error) - goto error_world; + goto unregister; } ath_init_leds(sc); @@ -901,17 +893,12 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, return 0; -error_world: +unregister: ieee80211_unregister_hw(hw); -error_register: +rx_cleanup: ath_rx_cleanup(sc); -error_rx: - ath_tx_cleanup(sc); -error_tx: - /* Nothing */ -error_regd: +deinit: ath9k_deinit_softc(sc); -error_init: return error; } @@ -923,12 +910,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc) { int i = 0; - if (sc->sbands[IEEE80211_BAND_2GHZ].channels) - kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels); - - if (sc->sbands[IEEE80211_BAND_5GHZ].channels) - kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels); - ath9k_deinit_btcoex(sc); for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) @@ -940,8 +921,11 @@ static void ath9k_deinit_softc(struct ath_softc *sc) sc->dfs_detector->exit(sc->dfs_detector); ath9k_eeprom_release(sc); - kfree(sc->sc_ah); - sc->sc_ah = NULL; + + if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) { + relay_close(sc->rfs_chan_spec_scan); + sc->rfs_chan_spec_scan = NULL; + } } void ath9k_deinit_device(struct ath_softc *sc) @@ -957,22 +941,9 @@ void ath9k_deinit_device(struct ath_softc *sc) ieee80211_unregister_hw(hw); ath_rx_cleanup(sc); - ath_tx_cleanup(sc); ath9k_deinit_softc(sc); } -void ath_descdma_cleanup(struct ath_softc *sc, - struct ath_descdma *dd, - struct list_head *head) -{ - dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, - dd->dd_desc_paddr); - - INIT_LIST_HEAD(head); - kfree(dd->dd_bufptr); - memset(dd, 0, sizeof(*dd)); -} - /************************/ /* Module Hooks */ /************************/ diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index b42be910a83d..811007ec07a7 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -605,13 +605,13 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, * reported, then decryption and MIC errors are irrelevant, * the frame is going to be dropped either way */ - if (ads.ds_rxstatus8 & AR_CRCErr) - rs->rs_status |= ATH9K_RXERR_CRC; - else if (ads.ds_rxstatus8 & AR_PHYErr) { + if (ads.ds_rxstatus8 & AR_PHYErr) { rs->rs_status |= ATH9K_RXERR_PHY; phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); rs->rs_phyerr = phyerr; - } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) + } else if (ads.ds_rxstatus8 & AR_CRCErr) + rs->rs_status |= ATH9K_RXERR_CRC; + else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) rs->rs_status |= ATH9K_RXERR_DECRYPT; else if (ads.ds_rxstatus8 & AR_MichaelErr) rs->rs_status |= ATH9K_RXERR_MIC; diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 4a745e68dd94..1ff817061ebc 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -226,7 +226,8 @@ enum ath9k_phyerr { ATH9K_PHYERR_HT_LENGTH_ILLEGAL = 35, ATH9K_PHYERR_HT_RATE_ILLEGAL = 36, - ATH9K_PHYERR_MAX = 37, + ATH9K_PHYERR_SPECTRAL = 38, + ATH9K_PHYERR_MAX = 39, }; struct ath_desc { diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index dd91f8fdc01c..6e66f9c6782b 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -182,7 +182,7 @@ static void ath_restart_work(struct ath_softc *sc) ath_start_ani(sc); } -static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx) +static bool ath_prepare_reset(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; bool ret = true; @@ -196,10 +196,10 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx) ath9k_debug_samp_bb_mac(sc); ath9k_hw_disable_interrupts(ah); - if (!ath_stoprecv(sc)) + if (!ath_drain_all_txq(sc)) ret = false; - if (!ath_drain_all_txq(sc, retry_tx)) + if (!ath_stoprecv(sc)) ret = false; return ret; @@ -247,8 +247,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) return true; } -static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan, - bool retry_tx) +static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); @@ -271,7 +270,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan, hchan = ah->curchan; } - if (!ath_prepare_reset(sc, retry_tx)) + if (!ath_prepare_reset(sc)) fastcc = false; ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n", @@ -312,7 +311,7 @@ static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, if (test_bit(SC_OP_INVALID, &sc->sc_flags)) return -EIO; - r = ath_reset_internal(sc, hchan, false); + r = ath_reset_internal(sc, hchan); return r; } @@ -321,28 +320,25 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, struct ieee80211_vif *vif) { struct ath_node *an; - u8 density; an = (struct ath_node *)sta->drv_priv; an->sc = sc; an->sta = sta; an->vif = vif; - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { - ath_tx_node_init(sc, an); + ath_tx_node_init(sc, an); + + if (sta->ht_cap.ht_supported) { an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + sta->ht_cap.ampdu_factor); - density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density); - an->mpdudensity = density; + an->mpdudensity = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density); } } static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta) { struct ath_node *an = (struct ath_node *)sta->drv_priv; - - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) - ath_tx_node_cleanup(sc, an); + ath_tx_node_cleanup(sc, an); } void ath9k_tasklet(unsigned long data) @@ -542,23 +538,21 @@ chip_reset: #undef SCHED_INTR } -static int ath_reset(struct ath_softc *sc, bool retry_tx) +static int ath_reset(struct ath_softc *sc) { - int r; + int i, r; ath9k_ps_wakeup(sc); - r = ath_reset_internal(sc, NULL, retry_tx); + r = ath_reset_internal(sc, NULL); - if (retry_tx) { - int i; - for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { - if (ATH_TXQ_SETUP(sc, i)) { - spin_lock_bh(&sc->tx.txq[i].axq_lock); - ath_txq_schedule(sc, &sc->tx.txq[i]); - spin_unlock_bh(&sc->tx.txq[i].axq_lock); - } - } + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { + if (!ATH_TXQ_SETUP(sc, i)) + continue; + + spin_lock_bh(&sc->tx.txq[i].axq_lock); + ath_txq_schedule(sc, &sc->tx.txq[i]); + spin_unlock_bh(&sc->tx.txq[i].axq_lock); } ath9k_ps_restore(sc); @@ -579,7 +573,7 @@ void ath_reset_work(struct work_struct *work) { struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work); - ath_reset(sc, true); + ath_reset(sc); } /**********************/ @@ -797,7 +791,7 @@ static void ath9k_stop(struct ieee80211_hw *hw) ath9k_hw_cfg_gpio_input(ah, ah->led_pin); } - ath_prepare_reset(sc, false); + ath_prepare_reset(sc); if (sc->rx.frag) { dev_kfree_skb_any(sc->rx.frag); @@ -1068,6 +1062,75 @@ static void ath9k_disable_ps(struct ath_softc *sc) ath_dbg(common, PS, "PowerSave disabled\n"); } +void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + u32 rxfilter; + + if (!ath9k_hw_ops(ah)->spectral_scan_trigger) { + ath_err(common, "spectrum analyzer not implemented on this hardware\n"); + return; + } + + ath9k_ps_wakeup(sc); + rxfilter = ath9k_hw_getrxfilter(ah); + ath9k_hw_setrxfilter(ah, rxfilter | + ATH9K_RX_FILTER_PHYRADAR | + ATH9K_RX_FILTER_PHYERR); + + /* TODO: usually this should not be neccesary, but for some reason + * (or in some mode?) the trigger must be called after the + * configuration, otherwise the register will have its values reset + * (on my ar9220 to value 0x01002310) + */ + ath9k_spectral_scan_config(hw, sc->spectral_mode); + ath9k_hw_ops(ah)->spectral_scan_trigger(ah); + ath9k_ps_restore(sc); +} + +int ath9k_spectral_scan_config(struct ieee80211_hw *hw, + enum spectral_mode spectral_mode) +{ + struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + + if (!ath9k_hw_ops(ah)->spectral_scan_trigger) { + ath_err(common, "spectrum analyzer not implemented on this hardware\n"); + return -1; + } + + switch (spectral_mode) { + case SPECTRAL_DISABLED: + sc->spec_config.enabled = 0; + break; + case SPECTRAL_BACKGROUND: + /* send endless samples. + * TODO: is this really useful for "background"? + */ + sc->spec_config.endless = 1; + sc->spec_config.enabled = 1; + break; + case SPECTRAL_CHANSCAN: + case SPECTRAL_MANUAL: + sc->spec_config.endless = 0; + sc->spec_config.enabled = 1; + break; + default: + return -1; + } + + ath9k_ps_wakeup(sc); + ath9k_hw_ops(ah)->spectral_scan_config(ah, &sc->spec_config); + ath9k_ps_restore(sc); + + sc->spectral_mode = spectral_mode; + + return 0; +} + static int ath9k_config(struct ieee80211_hw *hw, u32 changed) { struct ath_softc *sc = hw->priv; @@ -1181,6 +1244,11 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) */ if (old_pos >= 0) ath_update_survey_nf(sc, old_pos); + + /* perform spectral scan if requested. */ + if (sc->scanning && sc->spectral_mode == SPECTRAL_CHANSCAN) + ath9k_spectral_scan_trigger(hw); + } if (changed & IEEE80211_CONF_CHANGE_POWER) { @@ -1603,7 +1671,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); ath9k_ps_restore(sc); break; - case IEEE80211_AMPDU_TX_STOP: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: ath9k_ps_wakeup(sc); ath_tx_aggr_stop(sc, sta, tid); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); @@ -1722,11 +1792,11 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) if (drop) { ath9k_ps_wakeup(sc); spin_lock_bh(&sc->sc_pcu_lock); - drain_txq = ath_drain_all_txq(sc, false); + drain_txq = ath_drain_all_txq(sc); spin_unlock_bh(&sc->sc_pcu_lock); if (!drain_txq) - ath_reset(sc, false); + ath_reset(sc); ath9k_ps_restore(sc); ieee80211_wake_queues(hw); @@ -2234,6 +2304,19 @@ static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled) } #endif +static void ath9k_sw_scan_start(struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + + sc->scanning = 1; +} + +static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + + sc->scanning = 0; +} struct ieee80211_ops ath9k_ops = { .tx = ath9k_tx, @@ -2280,4 +2363,6 @@ struct ieee80211_ops ath9k_ops = { .sta_add_debugfs = ath9k_sta_add_debugfs, .sta_remove_debugfs = ath9k_sta_remove_debugfs, #endif + .sw_scan_start = ath9k_sw_scan_start, + .sw_scan_complete = ath9k_sw_scan_complete, }; diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c index 5c02702f21e7..815bee21c19a 100644 --- a/drivers/net/wireless/ath/ath9k/mci.c +++ b/drivers/net/wireless/ath/ath9k/mci.c @@ -438,7 +438,7 @@ int ath_mci_setup(struct ath_softc *sc) struct ath_mci_buf *buf = &mci->sched_buf; int ret; - buf->bf_addr = dma_alloc_coherent(sc->dev, + buf->bf_addr = dmam_alloc_coherent(sc->dev, ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE, &buf->bf_paddr, GFP_KERNEL); @@ -474,13 +474,6 @@ void ath_mci_cleanup(struct ath_softc *sc) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_hw *ah = sc->sc_ah; - struct ath_mci_coex *mci = &sc->mci_coex; - struct ath_mci_buf *buf = &mci->sched_buf; - - if (buf->bf_addr) - dma_free_coherent(sc->dev, - ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE, - buf->bf_addr, buf->bf_paddr); ar9003_mci_cleanup(ah); diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 7ae73fbd9136..0e0d39583837 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -147,7 +147,6 @@ static const struct ath_bus_ops ath_pci_bus_ops = { static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - void __iomem *mem; struct ath_softc *sc; struct ieee80211_hw *hw; u8 csz; @@ -155,19 +154,19 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) int ret = 0; char hw_name[64]; - if (pci_enable_device(pdev)) + if (pcim_enable_device(pdev)) return -EIO; ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { pr_err("32-bit DMA not available\n"); - goto err_dma; + return ret; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { pr_err("32-bit DMA consistent DMA enable failed\n"); - goto err_dma; + return ret; } /* @@ -203,25 +202,16 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); - ret = pci_request_region(pdev, 0, "ath9k"); + ret = pcim_iomap_regions(pdev, BIT(0), "ath9k"); if (ret) { dev_err(&pdev->dev, "PCI memory region reserve error\n"); - ret = -ENODEV; - goto err_region; - } - - mem = pci_iomap(pdev, 0, 0); - if (!mem) { - pr_err("PCI memory map error\n") ; - ret = -EIO; - goto err_iomap; + return -ENODEV; } hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops); if (!hw) { dev_err(&pdev->dev, "No memory for ieee80211_hw\n"); - ret = -ENOMEM; - goto err_alloc_hw; + return -ENOMEM; } SET_IEEE80211_DEV(hw, &pdev->dev); @@ -230,7 +220,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) sc = hw->priv; sc->hw = hw; sc->dev = &pdev->dev; - sc->mem = mem; + sc->mem = pcim_iomap_table(pdev)[0]; /* Will be cleared in ath9k_start() */ set_bit(SC_OP_INVALID, &sc->sc_flags); @@ -251,7 +241,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name)); wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", - hw_name, (unsigned long)mem, pdev->irq); + hw_name, (unsigned long)sc->mem, pdev->irq); return 0; @@ -259,14 +249,6 @@ err_init: free_irq(sc->irq, sc); err_irq: ieee80211_free_hw(hw); -err_alloc_hw: - pci_iounmap(pdev, mem); -err_iomap: - pci_release_region(pdev, 0); -err_region: - /* Nothing */ -err_dma: - pci_disable_device(pdev); return ret; } @@ -274,17 +256,12 @@ static void ath_pci_remove(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath_softc *sc = hw->priv; - void __iomem *mem = sc->mem; if (!is_ath9k_unloaded) sc->sc_ah->ah_flags |= AH_UNPLUGGED; ath9k_deinit_device(sc); free_irq(sc->irq, sc); ieee80211_free_hw(sc->hw); - - pci_iounmap(pdev, mem); - pci_disable_device(pdev); - pci_release_region(pdev, 0); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 714558d1ba78..96ac433ba7f6 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -1204,7 +1204,7 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta) caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG; else if (sta->ht_cap.mcs.rx_mask[1]) caps |= WLAN_RC_DS_FLAG; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { + if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { caps |= WLAN_RC_40_FLAG; if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) caps |= WLAN_RC_SGI_FLAG; @@ -1452,17 +1452,7 @@ static void ath_rate_free(void *priv) static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) { - struct ath_softc *sc = priv; - struct ath_rate_priv *rate_priv; - - rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp); - if (!rate_priv) { - ath_err(ath9k_hw_common(sc->sc_ah), - "Unable to allocate private rc structure\n"); - return NULL; - } - - return rate_priv; + return kzalloc(sizeof(struct ath_rate_priv), gfp); } static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 90752f246970..ee156e543147 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -15,6 +15,7 @@ */ #include <linux/dma-mapping.h> +#include <linux/relay.h> #include "ath9k.h" #include "ar9003_mac.h" @@ -180,11 +181,6 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc) bf->bf_mpdu = NULL; } } - - INIT_LIST_HEAD(&sc->rx.rxbuf); - - kfree(sc->rx.rx_bufptr); - sc->rx.rx_bufptr = NULL; } static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) @@ -211,12 +207,11 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) ah->caps.rx_hp_qdepth); size = sizeof(struct ath_buf) * nbufs; - bf = kzalloc(size, GFP_KERNEL); + bf = devm_kzalloc(sc->dev, size, GFP_KERNEL); if (!bf) return -ENOMEM; INIT_LIST_HEAD(&sc->rx.rxbuf); - sc->rx.rx_bufptr = bf; for (i = 0; i < nbufs; i++, bf++) { skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); @@ -357,9 +352,6 @@ void ath_rx_cleanup(struct ath_softc *sc) bf->bf_mpdu = NULL; } } - - if (sc->rx.rxdma.dd_desc_len != 0) - ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); } } @@ -541,7 +533,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) if (sc->ps_flags & PS_BEACON_SYNC) { sc->ps_flags &= ~PS_BEACON_SYNC; ath_dbg(common, PS, - "Reconfigure Beacon timers based on timestamp from the AP\n"); + "Reconfigure beacon timers based on synchronized timestamp\n"); ath9k_set_beacon(sc); } @@ -1024,6 +1016,134 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common, rxs->flag &= ~RX_FLAG_DECRYPTED; } +#ifdef CONFIG_ATH9K_DEBUGFS +static s8 fix_rssi_inv_only(u8 rssi_val) +{ + if (rssi_val == 128) + rssi_val = 0; + return (s8) rssi_val; +} +#endif + +/* returns 1 if this was a spectral frame, even if not handled. */ +static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr, + struct ath_rx_status *rs, u64 tsf) +{ +#ifdef CONFIG_ATH9K_DEBUGFS + struct ath_hw *ah = sc->sc_ah; + u8 bins[SPECTRAL_HT20_NUM_BINS]; + u8 *vdata = (u8 *)hdr; + struct fft_sample_ht20 fft_sample; + struct ath_radar_info *radar_info; + struct ath_ht20_mag_info *mag_info; + int len = rs->rs_datalen; + int dc_pos; + u16 length, max_magnitude; + + /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer + * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT + * yet, but this is supposed to be possible as well. + */ + if (rs->rs_phyerr != ATH9K_PHYERR_RADAR && + rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT && + rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL) + return 0; + + /* check if spectral scan bit is set. This does not have to be checked + * if received through a SPECTRAL phy error, but shouldn't hurt. + */ + radar_info = ((struct ath_radar_info *)&vdata[len]) - 1; + if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK)) + return 0; + + /* Variation in the data length is possible and will be fixed later. + * Note that we only support HT20 for now. + * + * TODO: add HT20_40 support as well. + */ + if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) || + (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1)) + return 1; + + fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20; + length = sizeof(fft_sample) - sizeof(fft_sample.tlv); + fft_sample.tlv.length = __cpu_to_be16(length); + + fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq); + fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0); + fft_sample.noise = ah->noise; + + switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) { + case 0: + /* length correct, nothing to do. */ + memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS); + break; + case -1: + /* first byte missing, duplicate it. */ + memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1); + bins[0] = vdata[0]; + break; + case 2: + /* MAC added 2 extra bytes at bin 30 and 32, remove them. */ + memcpy(bins, vdata, 30); + bins[30] = vdata[31]; + memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31); + break; + case 1: + /* MAC added 2 extra bytes AND first byte is missing. */ + bins[0] = vdata[0]; + memcpy(&bins[0], vdata, 30); + bins[31] = vdata[31]; + memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32); + break; + default: + return 1; + } + + /* DC value (value in the middle) is the blind spot of the spectral + * sample and invalid, interpolate it. + */ + dc_pos = SPECTRAL_HT20_NUM_BINS / 2; + bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2; + + /* mag data is at the end of the frame, in front of radar_info */ + mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1; + + /* copy raw bins without scaling them */ + memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS); + fft_sample.max_exp = mag_info->max_exp & 0xf; + + max_magnitude = spectral_max_magnitude(mag_info->all_bins); + fft_sample.max_magnitude = __cpu_to_be16(max_magnitude); + fft_sample.max_index = spectral_max_index(mag_info->all_bins); + fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins); + fft_sample.tsf = __cpu_to_be64(tsf); + + ath_debug_send_fft_sample(sc, &fft_sample.tlv); + return 1; +#else + return 0; +#endif +} + +static void ath9k_apply_ampdu_details(struct ath_softc *sc, + struct ath_rx_status *rs, struct ieee80211_rx_status *rxs) +{ + if (rs->rs_isaggr) { + rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; + + rxs->ampdu_reference = sc->rx.ampdu_ref; + + if (!rs->rs_moreaggr) { + rxs->flag |= RX_FLAG_AMPDU_IS_LAST; + sc->rx.ampdu_ref++; + } + + if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE) + rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR; + } +} + int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) { struct ath_buf *bf; @@ -1108,6 +1228,13 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) rxs->mactime += 0x100000000ULL; + if (rs.rs_status & ATH9K_RXERR_PHY) { + if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) { + RX_STAT_INC(rx_spectral); + goto requeue_drop_frag; + } + } + retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, rxs, &decrypt_error); if (retval) @@ -1223,6 +1350,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3) ath_ant_comb_scan(sc, &rs); + ath9k_apply_ampdu_details(sc, &rs, rxs); + ieee80211_rx(hw, skb); requeue_drop_frag: diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index ad3c82c09177..5929850649f0 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -789,6 +789,7 @@ #define AR_SREV_REVISION_9271_11 1 #define AR_SREV_VERSION_9300 0x1c0 #define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */ +#define AR_SREV_REVISION_9300_22 3 #define AR_SREV_VERSION_9330 0x200 #define AR_SREV_REVISION_9330_10 0 #define AR_SREV_REVISION_9330_11 1 @@ -869,6 +870,9 @@ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300)) #define AR_SREV_9300_20_OR_LATER(_ah) \ ((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9300) +#define AR_SREV_9300_22(_ah) \ + (AR_SREV_9300(ah) && \ + ((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_22)) #define AR_SREV_9330(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9330)) @@ -884,9 +888,6 @@ #define AR_SREV_9485(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485)) -#define AR_SREV_9485_10(_ah) \ - (AR_SREV_9485(_ah) && \ - ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_10)) #define AR_SREV_9485_11(_ah) \ (AR_SREV_9485(_ah) && \ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11)) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 90e48a0fafe5..89a64411b82e 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -378,7 +378,7 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf, static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf, struct list_head *bf_q, - struct ath_tx_status *ts, int txok, bool retry) + struct ath_tx_status *ts, int txok) { struct ath_node *an = NULL; struct sk_buff *skb; @@ -490,7 +490,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, } else if (!isaggr && txok) { /* transmit completion */ acked_cnt++; - } else if ((tid->state & AGGR_CLEANUP) || !retry) { + } else if (tid->state & AGGR_CLEANUP) { /* * cleanup in progress, just fail * the un-acked sub-frames @@ -604,6 +604,37 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR); } +static bool bf_is_ampdu_not_probing(struct ath_buf *bf) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu); + return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); +} + +static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, + struct ath_tx_status *ts, struct ath_buf *bf, + struct list_head *bf_head) +{ + bool txok, flush; + + txok = !(ts->ts_status & ATH9K_TXERR_MASK); + flush = !!(ts->ts_status & ATH9K_TX_FLUSH); + txq->axq_tx_inprogress = false; + + txq->axq_depth--; + if (bf_is_ampdu_not_probing(bf)) + txq->axq_ampdu_depth--; + + if (!bf_isampdu(bf)) { + if (!flush) + ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); + ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); + } else + ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok); + + if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush) + ath_txq_schedule(sc, txq); +} + static bool ath_lookup_legacy(struct ath_buf *bf) { struct sk_buff *skb; @@ -1202,7 +1233,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, * in HT IBSS when a beacon with HT-info is received after the station * has already been added. */ - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { + if (sta->ht_cap.ht_supported) { an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + sta->ht_cap.ampdu_factor); density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density); @@ -1331,23 +1362,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid /* Queue Management */ /********************/ -static void ath_txq_drain_pending_buffers(struct ath_softc *sc, - struct ath_txq *txq) -{ - struct ath_atx_ac *ac, *ac_tmp; - struct ath_atx_tid *tid, *tid_tmp; - - list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { - list_del(&ac->list); - ac->sched = false; - list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { - list_del(&tid->list); - tid->sched = false; - ath_tid_drain(sc, txq, tid); - } - } -} - struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) { struct ath_hw *ah = sc->sc_ah; @@ -1470,14 +1484,8 @@ int ath_cabq_update(struct ath_softc *sc) return 0; } -static bool bf_is_ampdu_not_probing(struct ath_buf *bf) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu); - return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); -} - static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, - struct list_head *list, bool retry_tx) + struct list_head *list) { struct ath_buf *bf, *lastbf; struct list_head bf_head; @@ -1499,16 +1507,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, lastbf = bf->bf_lastbf; list_cut_position(&bf_head, list, &lastbf->list); - - txq->axq_depth--; - if (bf_is_ampdu_not_probing(bf)) - txq->axq_ampdu_depth--; - - if (bf_isampdu(bf)) - ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0, - retry_tx); - else - ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); + ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); } } @@ -1518,7 +1517,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, * This assumes output has been stopped and * we do not need to block ath_tx_tasklet. */ -void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) +void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq) { ath_txq_lock(sc, txq); @@ -1526,8 +1525,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) int idx = txq->txq_tailidx; while (!list_empty(&txq->txq_fifo[idx])) { - ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx], - retry_tx); + ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]); INCR(idx, ATH_TXFIFO_DEPTH); } @@ -1536,16 +1534,12 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) txq->axq_link = NULL; txq->axq_tx_inprogress = false; - ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx); - - /* flush any pending frames if aggregation is enabled */ - if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx) - ath_txq_drain_pending_buffers(sc, txq); + ath_drain_txq_list(sc, txq, &txq->axq_q); ath_txq_unlock_complete(sc, txq); } -bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) +bool ath_drain_all_txq(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(sc->sc_ah); @@ -1581,7 +1575,7 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) */ txq = &sc->tx.txq[i]; txq->stopped = false; - ath_draintxq(sc, txq, retry_tx); + ath_draintxq(sc, txq); } return !npend; @@ -1910,8 +1904,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, struct ath_buf *bf; u8 tidno; - if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an && - ieee80211_is_data_qos(hdr->frame_control)) { + if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) { tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; tid = ATH_AN_2_TID(txctl->an, tidno); @@ -2175,28 +2168,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; } -static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, - struct ath_tx_status *ts, struct ath_buf *bf, - struct list_head *bf_head) -{ - int txok; - - txq->axq_depth--; - txok = !(ts->ts_status & ATH9K_TXERR_MASK); - txq->axq_tx_inprogress = false; - if (bf_is_ampdu_not_probing(bf)) - txq->axq_ampdu_depth--; - - if (!bf_isampdu(bf)) { - ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); - ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); - } else - ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true); - - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) - ath_txq_schedule(sc, txq); -} - static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) { struct ath_hw *ah = sc->sc_ah; @@ -2361,8 +2332,8 @@ static int ath_txstatus_setup(struct ath_softc *sc, int size) u8 txs_len = sc->sc_ah->caps.txs_len; dd->dd_desc_len = size * txs_len; - dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, - &dd->dd_desc_paddr, GFP_KERNEL); + dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len, + &dd->dd_desc_paddr, GFP_KERNEL); if (!dd->dd_desc) return -ENOMEM; @@ -2382,14 +2353,6 @@ static int ath_tx_edma_init(struct ath_softc *sc) return err; } -static void ath_tx_edma_cleanup(struct ath_softc *sc) -{ - struct ath_descdma *dd = &sc->txsdma; - - dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, - dd->dd_desc_paddr); -} - int ath_tx_init(struct ath_softc *sc, int nbufs) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); @@ -2402,7 +2365,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs) if (error != 0) { ath_err(common, "Failed to allocate tx descriptors: %d\n", error); - goto err; + return error; } error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, @@ -2410,36 +2373,17 @@ int ath_tx_init(struct ath_softc *sc, int nbufs) if (error != 0) { ath_err(common, "Failed to allocate beacon descriptors: %d\n", error); - goto err; + return error; } INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) error = ath_tx_edma_init(sc); - if (error) - goto err; - } - -err: - if (error != 0) - ath_tx_cleanup(sc); return error; } -void ath_tx_cleanup(struct ath_softc *sc) -{ - if (sc->beacon.bdma.dd_desc_len != 0) - ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); - - if (sc->tx.txdma.dd_desc_len != 0) - ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); - - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) - ath_tx_edma_cleanup(sc); -} - void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) { struct ath_atx_tid *tid; diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h index 2df17f1e49ef..25599741cd8a 100644 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h @@ -85,20 +85,14 @@ enum carl9170_device_state { CARL9170_STARTED, }; -#define CARL9170_NUM_TID 16 #define WME_BA_BMP_SIZE 64 #define CARL9170_TX_USER_RATE_TRIES 3 -#define WME_AC_BE 2 -#define WME_AC_BK 3 -#define WME_AC_VI 1 -#define WME_AC_VO 0 - #define TID_TO_WME_AC(_tid) \ - ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ - (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \ - (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ - WME_AC_VO) + ((((_tid) == 0) || ((_tid) == 3)) ? IEEE80211_AC_BE : \ + (((_tid) == 1) || ((_tid) == 2)) ? IEEE80211_AC_BK : \ + (((_tid) == 4) || ((_tid) == 5)) ? IEEE80211_AC_VI : \ + IEEE80211_AC_VO) #define SEQ_DIFF(_start, _seq) \ (((_start) - (_seq)) & 0x0fff) @@ -290,6 +284,7 @@ struct ar9170 { unsigned int rx_size; unsigned int tx_seq_table; bool ba_filter; + bool disable_offload_fw; } fw; /* interface configuration combinations */ @@ -493,8 +488,8 @@ struct carl9170_sta_info { bool sleeping; atomic_t pending_frames; unsigned int ampdu_max_len; - struct carl9170_sta_tid __rcu *agg[CARL9170_NUM_TID]; - struct carl9170_ba_stats stats[CARL9170_NUM_TID]; + struct carl9170_sta_tid __rcu *agg[IEEE80211_NUM_TIDS]; + struct carl9170_ba_stats stats[IEEE80211_NUM_TIDS]; }; struct carl9170_tx_info { diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c index 63fd9af3fd39..47d5c2e910ad 100644 --- a/drivers/net/wireless/ath/carl9170/fw.c +++ b/drivers/net/wireless/ath/carl9170/fw.c @@ -215,6 +215,24 @@ static int carl9170_fw_tx_sequence(struct ar9170 *ar) return 0; } +static void carl9170_fw_set_if_combinations(struct ar9170 *ar, + u16 if_comb_types) +{ + if (ar->fw.vif_num < 2) + return; + + ar->if_comb_limits[0].max = ar->fw.vif_num; + ar->if_comb_limits[0].types = if_comb_types; + + ar->if_combs[0].num_different_channels = 1; + ar->if_combs[0].max_interfaces = ar->fw.vif_num; + ar->if_combs[0].limits = ar->if_comb_limits; + ar->if_combs[0].n_limits = ARRAY_SIZE(ar->if_comb_limits); + + ar->hw->wiphy->iface_combinations = ar->if_combs; + ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ar->if_combs); +} + static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) { const struct carl9170fw_otus_desc *otus_desc; @@ -264,7 +282,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) if (!SUPP(CARL9170FW_COMMAND_CAM)) { dev_info(&ar->udev->dev, "crypto offloading is disabled " "by firmware.\n"); - ar->disable_offload = true; + ar->fw.disable_offload_fw = true; } if (SUPP(CARL9170FW_PSM) && SUPP(CARL9170FW_FIXED_5GHZ_PSM)) @@ -345,20 +363,15 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) } } - ar->if_comb_limits[0].max = ar->fw.vif_num; - ar->if_comb_limits[0].types = if_comb_types; - - ar->if_combs[0].num_different_channels = 1; - ar->if_combs[0].max_interfaces = ar->fw.vif_num; - ar->if_combs[0].limits = ar->if_comb_limits; - ar->if_combs[0].n_limits = ARRAY_SIZE(ar->if_comb_limits); - - ar->hw->wiphy->iface_combinations = ar->if_combs; - ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ar->if_combs); + carl9170_fw_set_if_combinations(ar, if_comb_types); ar->hw->wiphy->interface_modes |= if_comb_types; - ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + ar->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + /* As IBSS Encryption is software-based, IBSS RSN is supported. */ + ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_SUPPORTS_TDLS; #undef SUPPORTED return carl9170_fw_tx_sequence(ar); diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h index 9443c802b25b..9111d4ffc1b3 100644 --- a/drivers/net/wireless/ath/carl9170/fwcmd.h +++ b/drivers/net/wireless/ath/carl9170/fwcmd.h @@ -156,6 +156,14 @@ struct carl9170_psm { } __packed; #define CARL9170_PSM_SIZE 4 +/* + * Note: If a bit in rx_filter is set, then it + * means that the particular frames which matches + * the condition are FILTERED/REMOVED/DISCARDED! + * (This is can be a bit confusing, especially + * because someone people think it's the exact + * opposite way, so watch out!) + */ struct carl9170_rx_filter_cmd { __le32 rx_filter; } __packed; diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h index fa834c1460f0..0db874abde50 100644 --- a/drivers/net/wireless/ath/carl9170/hw.h +++ b/drivers/net/wireless/ath/carl9170/hw.h @@ -384,7 +384,7 @@ #define AR9170_MAC_REG_BCN_ADDR (AR9170_MAC_REG_BASE + 0xd84) #define AR9170_MAC_REG_BCN_LENGTH (AR9170_MAC_REG_BASE + 0xd88) -#define AR9170_MAC_BCN_LENGTH_MAX 256 +#define AR9170_MAC_BCN_LENGTH_MAX (512 - 32) #define AR9170_MAC_REG_BCN_STATUS (AR9170_MAC_REG_BASE + 0xd8c) diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 25a1e2f4f738..f293b3ff4756 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -358,8 +358,13 @@ static int carl9170_op_start(struct ieee80211_hw *hw) ar->ps.last_action = jiffies; ar->ps.last_slept = jiffies; ar->erp_mode = CARL9170_ERP_AUTO; - ar->rx_software_decryption = false; - ar->disable_offload = false; + + /* Set "disable hw crypto offload" whenever the module parameter + * nohwcrypt is true or if the firmware does not support it. + */ + ar->disable_offload = modparam_nohwcrypt | + ar->fw.disable_offload_fw; + ar->rx_software_decryption = ar->disable_offload; for (i = 0; i < ar->hw->queues; i++) { ar->queue_stop_timeout[i] = jiffies; @@ -565,12 +570,28 @@ static int carl9170_init_interface(struct ar9170 *ar, memcpy(common->macaddr, vif->addr, ETH_ALEN); - if (modparam_nohwcrypt || - ((vif->type != NL80211_IFTYPE_STATION) && - (vif->type != NL80211_IFTYPE_AP))) { - ar->rx_software_decryption = true; - ar->disable_offload = true; - } + /* We have to fall back to software crypto, whenever + * the user choose to participates in an IBSS. HW + * offload for IBSS RSN is not supported by this driver. + * + * NOTE: If the previous main interface has already + * disabled hw crypto offload, we have to keep this + * previous disable_offload setting as it was. + * Altough ideally, we should notify mac80211 and tell + * it to forget about any HW crypto offload for now. + */ + ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) && + (vif->type != NL80211_IFTYPE_AP)); + + /* While the driver supports HW offload in a single + * P2P client configuration, it doesn't support HW + * offload in the favourit, concurrent P2P GO+CLIENT + * configuration. Hence, HW offload will always be + * disabled for P2P. + */ + ar->disable_offload |= vif->p2p; + + ar->rx_software_decryption = ar->disable_offload; err = carl9170_set_operating_mode(ar); return err; @@ -580,7 +601,7 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv; - struct ieee80211_vif *main_vif; + struct ieee80211_vif *main_vif, *old_main = NULL; struct ar9170 *ar = hw->priv; int vif_id = -1, err = 0; @@ -602,6 +623,15 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, goto init; } + /* Because the AR9170 HW's MAC doesn't provide full support for + * multiple, independent interfaces [of different operation modes]. + * We have to select ONE main interface [main mode of HW], but we + * can have multiple slaves [AKA: entry in the ACK-table]. + * + * The first (from HEAD/TOP) interface in the ar->vif_list is + * always the main intf. All following intfs in this list + * are considered to be slave intfs. + */ main_vif = carl9170_get_main_vif(ar); if (main_vif) { @@ -610,6 +640,18 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_STATION) break; + /* P2P GO [master] use-case + * Because the P2P GO station is selected dynamically + * by all participating peers of a WIFI Direct network, + * the driver has be able to change the main interface + * operating mode on the fly. + */ + if (main_vif->p2p && vif->p2p && + vif->type == NL80211_IFTYPE_AP) { + old_main = main_vif; + break; + } + err = -EBUSY; rcu_read_unlock(); @@ -648,14 +690,41 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, vif_priv->id = vif_id; vif_priv->enable_beacon = false; ar->vifs++; - list_add_tail_rcu(&vif_priv->list, &ar->vif_list); + if (old_main) { + /* We end up in here, if the main interface is being replaced. + * Put the new main interface at the HEAD of the list and the + * previous inteface will automatically become second in line. + */ + list_add_rcu(&vif_priv->list, &ar->vif_list); + } else { + /* Add new inteface. If the list is empty, it will become the + * main inteface, otherwise it will be slave. + */ + list_add_tail_rcu(&vif_priv->list, &ar->vif_list); + } rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif); init: - if (carl9170_get_main_vif(ar) == vif) { + main_vif = carl9170_get_main_vif(ar); + + if (main_vif == vif) { rcu_assign_pointer(ar->beacon_iter, vif_priv); rcu_read_unlock(); + if (old_main) { + struct carl9170_vif_info *old_main_priv = + (void *) old_main->drv_priv; + /* downgrade old main intf to slave intf. + * NOTE: We are no longer under rcu_read_lock. + * But we are still holding ar->mutex, so the + * vif data [id, addr] is safe. + */ + err = carl9170_mod_virtual_mac(ar, old_main_priv->id, + old_main->addr); + if (err) + goto unlock; + } + err = carl9170_init_interface(ar, vif); if (err) goto unlock; @@ -1112,9 +1181,7 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (ar->disable_offload || !vif) return -EOPNOTSUPP; - /* - * We have to fall back to software encryption, whenever - * the user choose to participates in an IBSS or is connected + /* Fall back to software encryption whenever the driver is connected * to more than one network. * * This is very unfortunate, because some machines cannot handle @@ -1263,7 +1330,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw, return 0; } - for (i = 0; i < CARL9170_NUM_TID; i++) + for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) RCU_INIT_POINTER(sta_info->agg[i], NULL); sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor); @@ -1287,7 +1354,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw, sta_info->ht_sta = false; rcu_read_lock(); - for (i = 0; i < CARL9170_NUM_TID; i++) { + for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) { struct carl9170_sta_tid *tid_info; tid_info = rcu_dereference(sta_info->agg[i]); @@ -1394,7 +1461,9 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; - case IEEE80211_AMPDU_TX_STOP: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: rcu_read_lock(); tid_info = rcu_dereference(sta_info->agg[tid]); if (tid_info) { @@ -1784,7 +1853,7 @@ void *carl9170_alloc(size_t priv_size) IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_PS_NULLFUNC_STACK | - IEEE80211_HW_NEED_DTIM_PERIOD | + IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SIGNAL_DBM; if (!modparam_noht) { @@ -1805,10 +1874,6 @@ void *carl9170_alloc(size_t priv_size) for (i = 0; i < ARRAY_SIZE(ar->noise); i++) ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */ - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; - - /* As IBSS Encryption is software-based, IBSS RSN is supported. */ - hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; return ar; err_nomem: @@ -1916,13 +1981,13 @@ static int carl9170_parse_eeprom(struct ar9170 *ar) return 0; } -static int carl9170_reg_notifier(struct wiphy *wiphy, - struct regulatory_request *request) +static void carl9170_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct ar9170 *ar = hw->priv; - return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory); + ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory); } int carl9170_register(struct ar9170 *ar) diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index ef4ec0da6e49..9c0b150d5b8e 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -1520,35 +1520,92 @@ void carl9170_tx_scheduler(struct ar9170 *ar) carl9170_tx(ar); } -int carl9170_update_beacon(struct ar9170 *ar, const bool submit) +/* caller has to take rcu_read_lock */ +static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar) { - struct sk_buff *skb = NULL; struct carl9170_vif_info *cvif; + int i = 1; + + /* The AR9170 hardware has no fancy beacon queue or some + * other scheduling mechanism. So, the driver has to make + * due by setting the two beacon timers (pretbtt and tbtt) + * once and then swapping the beacon address in the HW's + * register file each time the pretbtt fires. + */ + + cvif = rcu_dereference(ar->beacon_iter); + if (ar->vifs > 0 && cvif) { + do { + list_for_each_entry_continue_rcu(cvif, &ar->vif_list, + list) { + if (cvif->active && cvif->enable_beacon) + goto out; + } + } while (ar->beacon_enabled && i--); + } + +out: + rcu_assign_pointer(ar->beacon_iter, cvif); + return cvif; +} + +static bool carl9170_tx_beacon_physet(struct ar9170 *ar, struct sk_buff *skb, + u32 *ht1, u32 *plcp) +{ struct ieee80211_tx_info *txinfo; struct ieee80211_tx_rate *rate; - __le32 *data, *old = NULL; - unsigned int plcp, power, chains; - u32 word, ht1, off, addr, len; - int i = 0, err = 0; + unsigned int power, chains; + bool ht_rate; - rcu_read_lock(); - cvif = rcu_dereference(ar->beacon_iter); -retry: - if (ar->vifs == 0 || !cvif) - goto out_unlock; + txinfo = IEEE80211_SKB_CB(skb); + rate = &txinfo->control.rates[0]; + ht_rate = !!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS); + carl9170_tx_rate_tpc_chains(ar, txinfo, rate, plcp, &power, &chains); - list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) { - if (cvif->active && cvif->enable_beacon) - goto found; + *ht1 = AR9170_MAC_BCN_HT1_TX_ANT0; + if (chains == AR9170_TX_PHY_TXCHAIN_2) + *ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1; + SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, *ht1, 7); + SET_VAL(AR9170_MAC_BCN_HT1_TPC, *ht1, power); + SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, *ht1, chains); + + if (ht_rate) { + *ht1 |= AR9170_MAC_BCN_HT1_HT_EN; + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + *plcp |= AR9170_MAC_BCN_HT2_SGI; + + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) { + *ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED; + *plcp |= AR9170_MAC_BCN_HT2_BW40; + } else if (rate->flags & IEEE80211_TX_RC_DUP_DATA) { + *ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP; + *plcp |= AR9170_MAC_BCN_HT2_BW40; + } + + SET_VAL(AR9170_MAC_BCN_HT2_LEN, *plcp, skb->len + FCS_LEN); + } else { + if (*plcp <= AR9170_TX_PHY_RATE_CCK_11M) + *plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400; + else + *plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010; } - if (!ar->beacon_enabled || i++) - goto out_unlock; + return ht_rate; +} - goto retry; +int carl9170_update_beacon(struct ar9170 *ar, const bool submit) +{ + struct sk_buff *skb = NULL; + struct carl9170_vif_info *cvif; + __le32 *data, *old = NULL; + u32 word, ht1, plcp, off, addr, len; + int i = 0, err = 0; + bool ht_rate; -found: - rcu_assign_pointer(ar->beacon_iter, cvif); + rcu_read_lock(); + cvif = carl9170_pick_beaconing_vif(ar); + if (!cvif) + goto out_unlock; skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif), NULL, NULL); @@ -1558,7 +1615,6 @@ found: goto err_free; } - txinfo = IEEE80211_SKB_CB(skb); spin_lock_bh(&ar->beacon_lock); data = (__le32 *)skb->data; if (cvif->beacon) @@ -1588,43 +1644,14 @@ found: goto err_unlock; } - ht1 = AR9170_MAC_BCN_HT1_TX_ANT0; - rate = &txinfo->control.rates[0]; - carl9170_tx_rate_tpc_chains(ar, txinfo, rate, &plcp, &power, &chains); - if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS)) { - if (plcp <= AR9170_TX_PHY_RATE_CCK_11M) - plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400; - else - plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010; - } else { - ht1 |= AR9170_MAC_BCN_HT1_HT_EN; - if (rate->flags & IEEE80211_TX_RC_SHORT_GI) - plcp |= AR9170_MAC_BCN_HT2_SGI; - - if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) { - ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED; - plcp |= AR9170_MAC_BCN_HT2_BW40; - } - if (rate->flags & IEEE80211_TX_RC_DUP_DATA) { - ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP; - plcp |= AR9170_MAC_BCN_HT2_BW40; - } - - SET_VAL(AR9170_MAC_BCN_HT2_LEN, plcp, skb->len + FCS_LEN); - } - - SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, ht1, 7); - SET_VAL(AR9170_MAC_BCN_HT1_TPC, ht1, power); - SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, ht1, chains); - if (chains == AR9170_TX_PHY_TXCHAIN_2) - ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1; + ht_rate = carl9170_tx_beacon_physet(ar, skb, &ht1, &plcp); carl9170_async_regwrite_begin(ar); carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT1, ht1); - if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS)) - carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp); - else + if (ht_rate) carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT2, plcp); + else + carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp); for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) { /* diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h index 2ec3e9191e4d..2282847d4bb8 100644 --- a/drivers/net/wireless/ath/carl9170/version.h +++ b/drivers/net/wireless/ath/carl9170/version.h @@ -1,7 +1,7 @@ #ifndef __CARL9170_SHARED_VERSION_H #define __CARL9170_SHARED_VERSION_H #define CARL9170FW_VERSION_YEAR 12 -#define CARL9170FW_VERSION_MONTH 7 -#define CARL9170FW_VERSION_DAY 7 -#define CARL9170FW_VERSION_GIT "1.9.6" +#define CARL9170FW_VERSION_MONTH 12 +#define CARL9170FW_VERSION_DAY 15 +#define CARL9170FW_VERSION_GIT "1.9.7" #endif /* __CARL9170_SHARED_VERSION_H */ diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index d81698015bf7..ccc4c718f124 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -195,8 +195,6 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy, const struct ieee80211_reg_rule *reg_rule; struct ieee80211_channel *ch; unsigned int i; - u32 bandwidth = 0; - int r; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { @@ -214,11 +212,8 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy, continue; if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { - r = freq_reg_info(wiphy, - ch->center_freq, - bandwidth, - ®_rule); - if (r) + reg_rule = freq_reg_info(wiphy, ch->center_freq); + if (IS_ERR(reg_rule)) continue; /* * If 11d had a rule for this channel ensure @@ -254,8 +249,6 @@ ath_reg_apply_active_scan_flags(struct wiphy *wiphy, struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; const struct ieee80211_reg_rule *reg_rule; - u32 bandwidth = 0; - int r; sband = wiphy->bands[IEEE80211_BAND_2GHZ]; if (!sband) @@ -283,16 +276,16 @@ ath_reg_apply_active_scan_flags(struct wiphy *wiphy, */ ch = &sband->channels[11]; /* CH 12 */ - r = freq_reg_info(wiphy, ch->center_freq, bandwidth, ®_rule); - if (!r) { + reg_rule = freq_reg_info(wiphy, ch->center_freq); + if (!IS_ERR(reg_rule)) { if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; } ch = &sband->channels[12]; /* CH 13 */ - r = freq_reg_info(wiphy, ch->center_freq, bandwidth, ®_rule); - if (!r) { + reg_rule = freq_reg_info(wiphy, ch->center_freq); + if (!IS_ERR(reg_rule)) { if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; @@ -363,9 +356,9 @@ static u16 ath_regd_find_country_by_name(char *alpha2) return -1; } -int ath_reg_notifier_apply(struct wiphy *wiphy, - struct regulatory_request *request, - struct ath_regulatory *reg) +void ath_reg_notifier_apply(struct wiphy *wiphy, + struct regulatory_request *request, + struct ath_regulatory *reg) { struct ath_common *common = container_of(reg, struct ath_common, regulatory); @@ -380,7 +373,7 @@ int ath_reg_notifier_apply(struct wiphy *wiphy, * any pending requests in the queue. */ if (!request) - return 0; + return; switch (request->initiator) { case NL80211_REGDOM_SET_BY_CORE: @@ -416,8 +409,6 @@ int ath_reg_notifier_apply(struct wiphy *wiphy, break; } - - return 0; } EXPORT_SYMBOL(ath_reg_notifier_apply); @@ -507,8 +498,8 @@ ath_get_regpair(int regdmn) static int ath_regd_init_wiphy(struct ath_regulatory *reg, struct wiphy *wiphy, - int (*reg_notifier)(struct wiphy *wiphy, - struct regulatory_request *request)) + void (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request *request)) { const struct ieee80211_regdomain *regd; @@ -628,8 +619,8 @@ static int __ath_regd_init(struct ath_regulatory *reg) int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy, - int (*reg_notifier)(struct wiphy *wiphy, - struct regulatory_request *request)) + void (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request *request)) { struct ath_common *common = container_of(reg, struct ath_common, regulatory); diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index 03a8268ccf21..37f53bd8fcb1 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -252,12 +252,12 @@ enum CountryCode { bool ath_is_world_regd(struct ath_regulatory *reg); bool ath_is_49ghz_allowed(u16 redomain); int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy, - int (*reg_notifier)(struct wiphy *wiphy, - struct regulatory_request *request)); + void (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request *request)); u32 ath_regd_get_band_ctl(struct ath_regulatory *reg, enum ieee80211_band band); -int ath_reg_notifier_apply(struct wiphy *wiphy, - struct regulatory_request *request, - struct ath_regulatory *reg); +void ath_reg_notifier_apply(struct wiphy *wiphy, + struct regulatory_request *request, + struct ath_regulatory *reg); #endif diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 116f4e807ae1..9ecc1968262c 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -204,7 +204,6 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, break; default: return -EOPNOTSUPP; - } /* FW don't support scan after connection attempt */ @@ -228,8 +227,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, } /* 0-based channel indexes */ cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1; - wil_dbg(wil, "Scan for ch %d : %d MHz\n", ch, - request->channels[i]->center_freq); + wil_dbg_misc(wil, "Scan for ch %d : %d MHz\n", ch, + request->channels[i]->center_freq); } return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) + @@ -342,7 +341,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, } out: - cfg80211_put_bss(bss); + cfg80211_put_bss(wiphy, bss); return rc; } @@ -425,8 +424,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, return -EINVAL; } - wil_dbg(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value, - channel->center_freq, info->privacy ? "secure" : "open"); + wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value, + channel->center_freq, info->privacy ? "secure" : "open"); print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, info->ssid, info->ssid_len); diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 38049da71049..dc97e7b2609c 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -38,7 +38,9 @@ #define WIL6210_IMC_RX BIT_DMA_EP_RX_ICR_RX_DONE #define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \ BIT_DMA_EP_TX_ICR_TX_DONE_N(0)) -#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | ISR_MISC_MBOX_EVT) +#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | \ + ISR_MISC_MBOX_EVT | \ + ISR_MISC_FW_ERROR) #define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \ BIT_DMA_PSEUDO_CAUSE_TX | \ @@ -50,7 +52,6 @@ static inline void wil_icr_clear(u32 x, void __iomem *addr) { - } #else /* defined(CONFIG_WIL6210_ISR_COR) */ /* configure to Write-1-to-Clear mode */ @@ -94,7 +95,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil) static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil) { - wil_dbg_IRQ(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "%s()\n", __func__); iowrite32(WIL6210_IRQ_DISABLE, wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW)); @@ -125,7 +126,7 @@ static void wil6210_unmask_irq_misc(struct wil6210_priv *wil) static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) { - wil_dbg_IRQ(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "%s()\n", __func__); set_bit(wil_status_irqen, &wil->status); @@ -135,7 +136,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) void wil6210_disable_irq(struct wil6210_priv *wil) { - wil_dbg_IRQ(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "%s()\n", __func__); wil6210_mask_irq_tx(wil); wil6210_mask_irq_rx(wil); @@ -145,7 +146,7 @@ void wil6210_disable_irq(struct wil6210_priv *wil) void wil6210_enable_irq(struct wil6210_priv *wil) { - wil_dbg_IRQ(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "%s()\n", __func__); iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) + offsetof(struct RGF_ICR, ICC)); @@ -167,7 +168,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) HOSTADDR(RGF_DMA_EP_RX_ICR) + offsetof(struct RGF_ICR, ICR)); - wil_dbg_IRQ(wil, "ISR RX 0x%08x\n", isr); + wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); if (!isr) { wil_err(wil, "spurious IRQ: RX\n"); @@ -177,7 +178,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) wil6210_mask_irq_rx(wil); if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) { - wil_dbg_IRQ(wil, "RX done\n"); + wil_dbg_irq(wil, "RX done\n"); isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE; wil_rx_handle(wil); } @@ -197,7 +198,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) HOSTADDR(RGF_DMA_EP_TX_ICR) + offsetof(struct RGF_ICR, ICR)); - wil_dbg_IRQ(wil, "ISR TX 0x%08x\n", isr); + wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); if (!isr) { wil_err(wil, "spurious IRQ: TX\n"); @@ -208,13 +209,13 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) { uint i; - wil_dbg_IRQ(wil, "TX done\n"); + wil_dbg_irq(wil, "TX done\n"); isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE; for (i = 0; i < 24; i++) { u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i); if (isr & mask) { isr &= ~mask; - wil_dbg_IRQ(wil, "TX done(%i)\n", i); + wil_dbg_irq(wil, "TX done(%i)\n", i); wil_tx_complete(wil, i); } } @@ -228,6 +229,17 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) return IRQ_HANDLED; } +static void wil_notify_fw_error(struct wil6210_priv *wil) +{ + struct device *dev = &wil_to_ndev(wil)->dev; + char *envp[3] = { + [0] = "SOURCE=wil6210", + [1] = "EVENT=FW_ERROR", + [2] = NULL, + }; + kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); +} + static irqreturn_t wil6210_irq_misc(int irq, void *cookie) { struct wil6210_priv *wil = cookie; @@ -235,7 +247,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) HOSTADDR(RGF_DMA_EP_MISC_ICR) + offsetof(struct RGF_ICR, ICR)); - wil_dbg_IRQ(wil, "ISR MISC 0x%08x\n", isr); + wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr); if (!isr) { wil_err(wil, "spurious IRQ: MISC\n"); @@ -244,8 +256,15 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) wil6210_mask_irq_misc(wil); + if (isr & ISR_MISC_FW_ERROR) { + wil_dbg_irq(wil, "IRQ: Firmware error\n"); + clear_bit(wil_status_fwready, &wil->status); + wil_notify_fw_error(wil); + isr &= ~ISR_MISC_FW_ERROR; + } + if (isr & ISR_MISC_FW_READY) { - wil_dbg_IRQ(wil, "IRQ: FW ready\n"); + wil_dbg_irq(wil, "IRQ: FW ready\n"); /** * Actual FW ready indicated by the * WMI_FW_READY_EVENTID @@ -268,10 +287,10 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie) struct wil6210_priv *wil = cookie; u32 isr = wil->isr_misc; - wil_dbg_IRQ(wil, "Thread ISR MISC 0x%08x\n", isr); + wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr); if (isr & ISR_MISC_MBOX_EVT) { - wil_dbg_IRQ(wil, "MBOX event\n"); + wil_dbg_irq(wil, "MBOX event\n"); wmi_recv_cmd(wil); isr &= ~ISR_MISC_MBOX_EVT; } @@ -293,7 +312,7 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie) { struct wil6210_priv *wil = cookie; - wil_dbg_IRQ(wil, "Thread IRQ\n"); + wil_dbg_irq(wil, "Thread IRQ\n"); /* Discover real IRQ cause */ if (wil->isr_misc) wil6210_irq_misc_thread(irq, cookie); @@ -370,6 +389,8 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) if (wil6210_debug_irq_mask(wil, pseudo_cause)) return IRQ_NONE; + wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause); + wil6210_mask_irq_pseudo(wil); /* Discover real IRQ cause @@ -401,8 +422,6 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) if (rc != IRQ_WAKE_THREAD) wil6210_unmask_irq_pseudo(wil); - wil_dbg_IRQ(wil, "Hard IRQ 0x%08x\n", pseudo_cause); - return rc; } diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 95fcd361322b..761c389586d4 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -64,7 +64,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid) struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *wdev = wil->wdev; - wil_dbg(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "%s()\n", __func__); wil_link_off(wil); clear_bit(wil_status_fwconnected, &wil->status); @@ -80,11 +80,13 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid) GFP_KERNEL); break; default: - ; + break; } for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) wil_vring_fini_tx(wil, i); + + clear_bit(wil_status_dontscan, &wil->status); } static void wil_disconnect_worker(struct work_struct *work) @@ -99,7 +101,7 @@ static void wil_connect_timer_fn(ulong x) { struct wil6210_priv *wil = (void *)x; - wil_dbg(wil, "Connect timeout\n"); + wil_dbg_misc(wil, "Connect timeout\n"); /* reschedule to thread context - disconnect won't * run from atomic context @@ -107,9 +109,18 @@ static void wil_connect_timer_fn(ulong x) schedule_work(&wil->disconnect_worker); } +static void wil_cache_mbox_regs(struct wil6210_priv *wil) +{ + /* make shadow copy of registers that should not change on run time */ + wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX, + sizeof(struct wil6210_mbox_ctl)); + wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx); + wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx); +} + int wil_priv_init(struct wil6210_priv *wil) { - wil_dbg(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "%s()\n", __func__); mutex_init(&wil->mutex); mutex_init(&wil->wmi_mutex); @@ -136,11 +147,7 @@ int wil_priv_init(struct wil6210_priv *wil) return -EAGAIN; } - /* make shadow copy of registers that should not change on run time */ - wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX, - sizeof(struct wil6210_mbox_ctl)); - wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx); - wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx); + wil_cache_mbox_regs(wil); return 0; } @@ -162,7 +169,7 @@ void wil_priv_deinit(struct wil6210_priv *wil) static void wil_target_reset(struct wil6210_priv *wil) { - wil_dbg(wil, "Resetting...\n"); + wil_dbg_misc(wil, "Resetting...\n"); /* register write */ #define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a)) @@ -202,7 +209,7 @@ static void wil_target_reset(struct wil6210_priv *wil) msleep(2000); - wil_dbg(wil, "Reset completed\n"); + wil_dbg_misc(wil, "Reset completed\n"); #undef W #undef S @@ -225,8 +232,8 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil) wil_err(wil, "Firmware not ready\n"); return -ETIME; } else { - wil_dbg(wil, "FW ready after %d ms\n", - jiffies_to_msecs(to-left)); + wil_dbg_misc(wil, "FW ready after %d ms\n", + jiffies_to_msecs(to-left)); } return 0; } @@ -243,13 +250,13 @@ int wil_reset(struct wil6210_priv *wil) cancel_work_sync(&wil->disconnect_worker); wil6210_disconnect(wil, NULL); + wil6210_disable_irq(wil); + wil->status = 0; + wmi_event_flush(wil); - flush_workqueue(wil->wmi_wq); flush_workqueue(wil->wmi_wq_conn); - - wil6210_disable_irq(wil); - wil->status = 0; + flush_workqueue(wil->wmi_wq); /* TODO: put MAC in reset */ wil_target_reset(wil); @@ -258,11 +265,7 @@ int wil_reset(struct wil6210_priv *wil) wil->pending_connect_cid = -1; INIT_COMPLETION(wil->wmi_ready); - /* make shadow copy of registers that should not change on run time */ - wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX, - sizeof(struct wil6210_mbox_ctl)); - wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx); - wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx); + wil_cache_mbox_regs(wil); /* TODO: release MAC reset */ wil6210_enable_irq(wil); @@ -278,7 +281,7 @@ void wil_link_on(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); - wil_dbg(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "%s()\n", __func__); netif_carrier_on(ndev); netif_tx_wake_all_queues(ndev); @@ -288,7 +291,7 @@ void wil_link_off(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); - wil_dbg(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "%s()\n", __func__); netif_tx_stop_all_queues(ndev); netif_carrier_off(ndev); @@ -311,27 +314,27 @@ static int __wil_up(struct wil6210_priv *wil) wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC); switch (wdev->iftype) { case NL80211_IFTYPE_STATION: - wil_dbg(wil, "type: STATION\n"); + wil_dbg_misc(wil, "type: STATION\n"); bi = 0; ndev->type = ARPHRD_ETHER; break; case NL80211_IFTYPE_AP: - wil_dbg(wil, "type: AP\n"); + wil_dbg_misc(wil, "type: AP\n"); bi = 100; ndev->type = ARPHRD_ETHER; break; case NL80211_IFTYPE_P2P_CLIENT: - wil_dbg(wil, "type: P2P_CLIENT\n"); + wil_dbg_misc(wil, "type: P2P_CLIENT\n"); bi = 0; ndev->type = ARPHRD_ETHER; break; case NL80211_IFTYPE_P2P_GO: - wil_dbg(wil, "type: P2P_GO\n"); + wil_dbg_misc(wil, "type: P2P_GO\n"); bi = 100; ndev->type = ARPHRD_ETHER; break; case NL80211_IFTYPE_MONITOR: - wil_dbg(wil, "type: Monitor\n"); + wil_dbg_misc(wil, "type: Monitor\n"); bi = 0; ndev->type = ARPHRD_IEEE80211_RADIOTAP; /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */ @@ -354,7 +357,7 @@ static int __wil_up(struct wil6210_priv *wil) wmi_set_channel(wil, channel->hw_value); break; default: - ; + break; } /* MAC address - pre-requisite for other commands */ diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 3068b5cb53a7..8ce2e33dce20 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -35,37 +35,12 @@ static int wil_stop(struct net_device *ndev) return wil_down(wil); } -/* - * AC to queue mapping - * - * AC_VO -> queue 3 - * AC_VI -> queue 2 - * AC_BE -> queue 1 - * AC_BK -> queue 0 - */ -static u16 wil_select_queue(struct net_device *ndev, struct sk_buff *skb) -{ - static const u16 wil_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; - struct wil6210_priv *wil = ndev_to_wil(ndev); - u16 rc; - - skb->priority = cfg80211_classify8021d(skb); - - rc = wil_1d_to_queue[skb->priority]; - - wil_dbg_TXRX(wil, "%s() %d -> %d\n", __func__, (int)skb->priority, - (int)rc); - - return rc; -} - static const struct net_device_ops wil_netdev_ops = { .ndo_open = wil_open, .ndo_stop = wil_stop, .ndo_start_xmit = wil_start_xmit, - .ndo_select_queue = wil_select_queue, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, }; void *wil_if_alloc(struct device *dev, void __iomem *csr) @@ -97,7 +72,7 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr) ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels; cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT); - ndev = alloc_netdev_mqs(0, "wlan%d", ether_setup, WIL6210_TX_QUEUES, 1); + ndev = alloc_netdev(0, "wlan%d", ether_setup); if (!ndev) { dev_err(dev, "alloc_netdev_mqs failed\n"); rc = -ENOMEM; diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 0fc83edd6bad..81c35c6e3832 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -53,7 +53,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) } wil->n_msi = use_msi; if (wil->n_msi) { - wil_dbg(wil, "Setup %d MSI interrupts\n", use_msi); + wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi); rc = pci_enable_msi_block(pdev, wil->n_msi); if (rc && (wil->n_msi == 3)) { wil_err(wil, "3 MSI mode failed, try 1 MSI\n"); @@ -65,7 +65,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) wil->n_msi = 0; } } else { - wil_dbg(wil, "MSI interrupts disabled, use INTx\n"); + wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n"); } rc = wil6210_init_irq(wil, pdev->irq); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index f29c294413cf..d1315b442375 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -74,8 +74,6 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) vring->swtail = 0; vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL); if (!vring->ctx) { - wil_err(wil, "vring_alloc [%d] failed to alloc ctx mem\n", - vring->size); vring->va = NULL; return -ENOMEM; } @@ -100,8 +98,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) d->dma.status = TX_DMA_STATUS_DU; } - wil_dbg(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size, - vring->va, (unsigned long long)vring->pa, vring->ctx); + wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size, + vring->va, (unsigned long long)vring->pa, vring->ctx); return 0; } @@ -353,8 +351,8 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) wil_rx_add_radiotap_header(wil, skb, d); - wil_dbg_TXRX(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length); - wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_NONE, 32, 4, + wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length); + wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); wil_vring_advance_head(vring, 1); @@ -369,7 +367,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, */ ftype = wil_rxdesc_ftype(d) << 2; if (ftype != IEEE80211_FTYPE_DATA) { - wil_dbg_TXRX(wil, "Non-data frame ftype 0x%08x\n", ftype); + wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype); /* TODO: process it */ kfree_skb(skb); return NULL; @@ -430,6 +428,8 @@ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) int rc; unsigned int len = skb->len; + skb_orphan(skb); + if (in_interrupt()) rc = netif_rx(skb); else @@ -459,13 +459,11 @@ void wil_rx_handle(struct wil6210_priv *wil) wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); return; } - wil_dbg_TXRX(wil, "%s()\n", __func__); + wil_dbg_txrx(wil, "%s()\n", __func__); while (NULL != (skb = wil_vring_reap_rx(wil, v))) { - wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_OFFSET, 16, 1, + wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb_headlen(skb), false); - skb_orphan(skb); - if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { skb->dev = ndev; skb_reset_mac_header(skb); @@ -484,53 +482,18 @@ void wil_rx_handle(struct wil6210_priv *wil) int wil_rx_init(struct wil6210_priv *wil) { - struct net_device *ndev = wil_to_ndev(wil); - struct wireless_dev *wdev = wil->wdev; struct vring *vring = &wil->vring_rx; int rc; - struct wmi_cfg_rx_chain_cmd cmd = { - .action = WMI_RX_CHAIN_ADD, - .rx_sw_ring = { - .max_mpdu_size = cpu_to_le16(RX_BUF_LEN), - }, - .mid = 0, /* TODO - what is it? */ - .decap_trans_type = WMI_DECAP_TYPE_802_3, - }; - struct { - struct wil6210_mbox_hdr_wmi wmi; - struct wmi_cfg_rx_chain_done_event evt; - } __packed evt; vring->size = WIL6210_RX_RING_SIZE; rc = wil_vring_alloc(wil, vring); if (rc) return rc; - cmd.rx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); - cmd.rx_sw_ring.ring_size = cpu_to_le16(vring->size); - if (wdev->iftype == NL80211_IFTYPE_MONITOR) { - struct ieee80211_channel *ch = wdev->preset_chandef.chan; - - cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON); - if (ch) - cmd.sniffer_cfg.channel = ch->hw_value - 1; - cmd.sniffer_cfg.phy_info_mode = - cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP); - cmd.sniffer_cfg.phy_support = - cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL) - ? WMI_SNIFFER_CP : WMI_SNIFFER_DP); - } - /* typical time for secure PCP is 840ms */ - rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd), - WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000); + rc = wmi_rx_chain_add(wil, vring); if (rc) goto err_free; - vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr); - - wil_dbg(wil, "Rx init: status %d tail 0x%08x\n", - le32_to_cpu(evt.evt.status), vring->hwtail); - rc = wil_rx_refill(wil, vring->size); if (rc) goto err_free; @@ -546,25 +509,8 @@ void wil_rx_fini(struct wil6210_priv *wil) { struct vring *vring = &wil->vring_rx; - if (vring->va) { - int rc; - struct wmi_cfg_rx_chain_cmd cmd = { - .action = cpu_to_le32(WMI_RX_CHAIN_DEL), - .rx_sw_ring = { - .max_mpdu_size = cpu_to_le16(RX_BUF_LEN), - }, - }; - struct { - struct wil6210_mbox_hdr_wmi wmi; - struct wmi_cfg_rx_chain_done_event cfg; - } __packed wmi_rx_cfg_reply; - - rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd), - WMI_CFG_RX_CHAIN_DONE_EVENTID, - &wmi_rx_cfg_reply, sizeof(wmi_rx_cfg_reply), - 100); + if (vring->va) wil_vring_free(wil, vring, 0); - } } int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, @@ -617,6 +563,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, if (reply.cmd.status != WMI_VRING_CFG_SUCCESS) { wil_err(wil, "Tx config failed, status 0x%02x\n", reply.cmd.status); + rc = -EINVAL; goto out_free; } vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); @@ -689,7 +636,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, uint i = swhead; dma_addr_t pa; - wil_dbg_TXRX(wil, "%s()\n", __func__); + wil_dbg_txrx(wil, "%s()\n", __func__); if (avail < vring->size/8) netif_tx_stop_all_queues(wil_to_ndev(wil)); @@ -706,9 +653,9 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - wil_dbg_TXRX(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb), + wil_dbg_txrx(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb), skb->data, (unsigned long long)pa); - wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_OFFSET, 16, 1, + wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb_headlen(skb), false); if (unlikely(dma_mapping_error(dev, pa))) @@ -737,12 +684,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS); - wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_NONE, 32, 4, + wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); /* advance swhead */ wil_vring_advance_head(vring, nr_frags + 1); - wil_dbg_TXRX(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead); + wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead); iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail)); /* hold reference to skb * to prevent skb release before accounting @@ -775,7 +722,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct vring *vring; int rc; - wil_dbg_TXRX(wil, "%s()\n", __func__); + wil_dbg_txrx(wil, "%s()\n", __func__); if (!test_bit(wil_status_fwready, &wil->status)) { wil_err(wil, "FW not ready\n"); goto drop; @@ -802,15 +749,13 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) } switch (rc) { case 0: - ndev->stats.tx_packets++; - ndev->stats.tx_bytes += skb->len; + /* statistics will be updated on the tx_complete */ dev_kfree_skb_any(skb); return NETDEV_TX_OK; case -ENOMEM: return NETDEV_TX_BUSY; default: - ; /* goto drop; */ - break; + break; /* goto drop; */ } drop: netif_tx_stop_all_queues(ndev); @@ -827,6 +772,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) */ void wil_tx_complete(struct wil6210_priv *wil, int ringid) { + struct net_device *ndev = wil_to_ndev(wil); struct device *dev = wil_to_dev(wil); struct vring *vring = &wil->vring_tx[ringid]; @@ -835,7 +781,7 @@ void wil_tx_complete(struct wil6210_priv *wil, int ringid) return; } - wil_dbg_TXRX(wil, "%s(%d)\n", __func__, ringid); + wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); while (!wil_vring_is_empty(vring)) { volatile struct vring_tx_desc *d = &vring->va[vring->swtail].tx; @@ -844,16 +790,23 @@ void wil_tx_complete(struct wil6210_priv *wil, int ringid) if (!(d->dma.status & TX_DMA_STATUS_DU)) break; - wil_dbg_TXRX(wil, + wil_dbg_txrx(wil, "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n", vring->swtail, d->dma.length, d->dma.status, d->dma.error); - wil_hex_dump_TXRX("TxC ", DUMP_PREFIX_NONE, 32, 4, + wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32); skb = vring->ctx[vring->swtail]; if (skb) { + if (d->dma.error == 0) { + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + } else { + ndev->stats.tx_errors++; + } + dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE); dev_kfree_skb_any(skb); vring->ctx[vring->swtail] = NULL; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 9bcfffa4006c..aea961ff8f08 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -36,8 +36,6 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) #define WIL6210_MEM_SIZE (2*1024*1024UL) -#define WIL6210_TX_QUEUES (4) - #define WIL6210_RX_RING_SIZE (128) #define WIL6210_TX_RING_SIZE (128) #define WIL6210_MAX_TX_RINGS (24) @@ -101,8 +99,7 @@ struct RGF_ICR { #define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */ #define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0) #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1) - #define BIT_DMA_EP_MISC_ICR_FW_INT0 BIT(28) - #define BIT_DMA_EP_MISC_ICR_FW_INT1 BIT(29) + #define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */ /* Interrupt moderation control */ #define RGF_DMA_ITR_CNT_TRSH (0x881c5c) @@ -121,8 +118,9 @@ struct RGF_ICR { #define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2 /* ISR register bits */ -#define ISR_MISC_FW_READY BIT_DMA_EP_MISC_ICR_FW_INT0 -#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT1 +#define ISR_MISC_FW_READY BIT_DMA_EP_MISC_ICR_FW_INT(0) +#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT(1) +#define ISR_MISC_FW_ERROR BIT_DMA_EP_MISC_ICR_FW_INT(3) /* Hardware definitions end */ @@ -272,17 +270,18 @@ struct wil6210_priv { #define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg) #define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg) -#define wil_dbg_IRQ(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg) -#define wil_dbg_TXRX(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg) -#define wil_dbg_WMI(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg) +#define wil_dbg_irq(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg) +#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg) +#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg) +#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg) -#define wil_hex_dump_TXRX(prefix_str, prefix_type, rowsize, \ +#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ wil_print_hex_dump_debug("DBG[TXRX]" prefix_str,\ prefix_type, rowsize, \ groupsize, buf, len, ascii) -#define wil_hex_dump_WMI(prefix_str, prefix_type, rowsize, \ +#define wil_hex_dump_wmi(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ wil_print_hex_dump_debug("DBG[ WMI]" prefix_str,\ prefix_type, rowsize, \ @@ -328,6 +327,7 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, const void *mac_addr, int key_len, const void *key); int wmi_echo(struct wil6210_priv *wil); int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie); +int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); int wil6210_init_irq(struct wil6210_priv *wil, int irq); void wil6210_fini_irq(struct wil6210_priv *wil, int irq); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 12915f6e7617..0bb3b76b4b58 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -18,8 +18,10 @@ #include <linux/io.h> #include <linux/list.h> #include <linux/etherdevice.h> +#include <linux/if_arp.h> #include "wil6210.h" +#include "txrx.h" #include "wmi.h" /** @@ -186,7 +188,6 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) wil_err(wil, "WMI size too large: %d bytes, max is %d\n", (int)(sizeof(cmd) + len), r->entry_size); return -ERANGE; - } might_sleep(); @@ -213,7 +214,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) } /* next head */ next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size); - wil_dbg_WMI(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head); + wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head); /* wait till FW finish with previous command */ for (retry = 5; retry > 0; retry--) { r->tail = ioread32(wil->csr + HOST_MBOX + @@ -234,10 +235,10 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) } cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq); /* set command */ - wil_dbg_WMI(wil, "WMI command 0x%04x [%d]\n", cmdid, len); - wil_hex_dump_WMI("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd, + wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len); + wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd, sizeof(cmd), true); - wil_hex_dump_WMI("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf, + wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true); wil_memcpy_toio_32(dst, &cmd, sizeof(cmd)); wil_memcpy_toio_32(dst + sizeof(cmd), buf, len); @@ -273,7 +274,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) struct wmi_ready_event *evt = d; u32 ver = le32_to_cpu(evt->sw_version); - wil_dbg_WMI(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac); + wil_dbg_wmi(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac); if (!is_valid_ether_addr(ndev->dev_addr)) { memcpy(ndev->dev_addr, evt->mac, ETH_ALEN); @@ -286,7 +287,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d, int len) { - wil_dbg_WMI(wil, "WMI: FW ready\n"); + wil_dbg_wmi(wil, "WMI: FW ready\n"); set_bit(wil_status_fwready, &wil->status); /* reuse wmi_ready for the firmware ready indication */ @@ -309,11 +310,11 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) u32 d_len = le32_to_cpu(data->info.len); u16 d_status = le16_to_cpu(data->info.status); - wil_dbg_WMI(wil, "MGMT: channel %d MCS %d SNR %d\n", + wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n", data->info.channel, data->info.mcs, data->info.snr); - wil_dbg_WMI(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len, + wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len, le16_to_cpu(data->info.stype)); - wil_dbg_WMI(wil, "qid %d mid %d cid %d\n", + wil_dbg_wmi(wil, "qid %d mid %d cid %d\n", data->info.qid, data->info.mid, data->info.cid); if (!channel) { @@ -329,15 +330,15 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable; size_t ie_len = d_len - offsetof(struct ieee80211_mgmt, u.beacon.variable); - wil_dbg_WMI(wil, "Capability info : 0x%04x\n", cap); + wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap); bss = cfg80211_inform_bss(wiphy, channel, rx_mgmt_frame->bssid, tsf, cap, bi, ie_buf, ie_len, signal, GFP_KERNEL); if (bss) { - wil_dbg_WMI(wil, "Added BSS %pM\n", + wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid); - cfg80211_put_bss(bss); + cfg80211_put_bss(wiphy, bss); } else { wil_err(wil, "cfg80211_inform_bss() failed\n"); } @@ -351,7 +352,7 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id, struct wmi_scan_complete_event *data = d; bool aborted = (data->status != 0); - wil_dbg_WMI(wil, "SCAN_COMPLETE(0x%08x)\n", data->status); + wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status); cfg80211_scan_done(wil->scan_request, aborted); wil->scan_request = NULL; } else { @@ -386,9 +387,9 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) return; } ch = evt->channel + 1; - wil_dbg_WMI(wil, "Connect %pM channel [%d] cid %d\n", + wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n", evt->bssid, ch, evt->cid); - wil_hex_dump_WMI("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1, + wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1, evt->assoc_info, len - sizeof(*evt), true); /* figure out IE's */ @@ -450,14 +451,13 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, { struct wmi_disconnect_event *evt = d; - wil_dbg_WMI(wil, "Disconnect %pM reason %d proto %d wmi\n", + wil_dbg_wmi(wil, "Disconnect %pM reason %d proto %d wmi\n", evt->bssid, evt->protocol_reason_status, evt->disconnect_reason); wil->sinfo_gen++; wil6210_disconnect(wil, evt->bssid); - clear_bit(wil_status_dontscan, &wil->status); } static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len) @@ -476,7 +476,7 @@ static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len) wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector); wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector); wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector); - wil_dbg_WMI(wil, "Link status, MCS %d TSF 0x%016llx\n" + wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n" "BF status 0x%08x SNR 0x%08x\n" "Tx Tpt %d goodput %d Rx goodput %d\n" "Sectors(rx:tx) my %d:%d peer %d:%d\n", @@ -501,7 +501,7 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id, struct sk_buff *skb; struct ethhdr *eth; - wil_dbg_WMI(wil, "EAPOL len %d from %pM\n", eapol_len, + wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len, evt->src_mac); if (eapol_len > 196) { /* TODO: revisit size limit */ @@ -587,11 +587,9 @@ void wmi_recv_cmd(struct wil6210_priv *wil) evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event, event.wmi) + len, 4), GFP_KERNEL); - if (!evt) { - wil_err(wil, "kmalloc for WMI event (%d) failed\n", - len); + if (!evt) return; - } + evt->event.hdr = hdr; cmd = (void *)&evt->event.wmi; wil_memcpy_fromio_32(cmd, src, len); @@ -599,15 +597,15 @@ void wmi_recv_cmd(struct wil6210_priv *wil) iowrite32(0, wil->csr + HOSTADDR(r->tail) + offsetof(struct wil6210_mbox_ring_desc, sync)); /* indicate */ - wil_dbg_WMI(wil, "Mbox evt %04x %04x %04x %02x\n", + wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n", le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type), hdr.flags); if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { - wil_dbg_WMI(wil, "WMI event 0x%04x\n", + wil_dbg_wmi(wil, "WMI event 0x%04x\n", evt->event.wmi.id); } - wil_hex_dump_WMI("evt ", DUMP_PREFIX_OFFSET, 16, 1, + wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1, &evt->event.hdr, sizeof(hdr) + len, true); /* advance tail */ @@ -623,7 +621,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) { int q = queue_work(wil->wmi_wq, &wil->wmi_event_worker); - wil_dbg_WMI(wil, "queue_work -> %d\n", q); + wil_dbg_wmi(wil, "queue_work -> %d\n", q); } } } @@ -650,7 +648,7 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, cmdid, reply_id, to_msec); rc = -ETIME; } else { - wil_dbg_WMI(wil, + wil_dbg_wmi(wil, "wmi_call(0x%04x->0x%04x) completed in %d msec\n", cmdid, reply_id, to_msec - jiffies_to_msecs(remain)); @@ -680,7 +678,7 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr) memcpy(cmd.mac, addr, ETH_ALEN); - wil_dbg_WMI(wil, "Set MAC %pM\n", addr); + wil_dbg_wmi(wil, "Set MAC %pM\n", addr); return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd)); } @@ -778,7 +776,7 @@ int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb) skb_set_mac_header(skb, 0); eth = eth_hdr(skb); - wil_dbg_WMI(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest); + wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest); for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0) goto found_dest; @@ -838,10 +836,8 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) int rc; u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len; struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL); - if (!cmd) { - wil_err(wil, "kmalloc(%d) failed\n", len); + if (!cmd) return -ENOMEM; - } cmd->mgmt_frm_type = type; /* BUG: FW API define ieLen as u8. Will fix FW */ @@ -853,11 +849,60 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) return rc; } +int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) +{ + struct wireless_dev *wdev = wil->wdev; + struct net_device *ndev = wil_to_ndev(wil); + struct wmi_cfg_rx_chain_cmd cmd = { + .action = WMI_RX_CHAIN_ADD, + .rx_sw_ring = { + .max_mpdu_size = cpu_to_le16(RX_BUF_LEN), + .ring_mem_base = cpu_to_le64(vring->pa), + .ring_size = cpu_to_le16(vring->size), + }, + .mid = 0, /* TODO - what is it? */ + .decap_trans_type = WMI_DECAP_TYPE_802_3, + }; + struct { + struct wil6210_mbox_hdr_wmi wmi; + struct wmi_cfg_rx_chain_done_event evt; + } __packed evt; + int rc; + + if (wdev->iftype == NL80211_IFTYPE_MONITOR) { + struct ieee80211_channel *ch = wdev->preset_chandef.chan; + + cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON); + if (ch) + cmd.sniffer_cfg.channel = ch->hw_value - 1; + cmd.sniffer_cfg.phy_info_mode = + cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP); + cmd.sniffer_cfg.phy_support = + cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL) + ? WMI_SNIFFER_CP : WMI_SNIFFER_DP); + } + /* typical time for secure PCP is 840ms */ + rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd), + WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000); + if (rc) + return rc; + + vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr); + + wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n", + le32_to_cpu(evt.evt.status), vring->hwtail); + + if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS) + rc = -EINVAL; + + return rc; +} + void wmi_event_flush(struct wil6210_priv *wil) { struct pending_wmi_event *evt, *t; - wil_dbg_WMI(wil, "%s()\n", __func__); + wil_dbg_wmi(wil, "%s()\n", __func__); list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) { list_del(&evt->list); @@ -899,7 +944,7 @@ static void wmi_event_handle(struct wil6210_priv *wil, wmi_evt_call_handler(wil, id, evt_data, len - sizeof(*wmi)); } - wil_dbg_WMI(wil, "Complete WMI 0x%04x\n", id); + wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id); complete(&wil->wmi_ready); return; } @@ -964,7 +1009,7 @@ void wmi_connect_worker(struct work_struct *work) return; } - wil_dbg_WMI(wil, "Configure for connection CID %d\n", + wil_dbg_wmi(wil, "Configure for connection CID %d\n", wil->pending_connect_cid); rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE, diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c index ded03d226a71..b42930f457c2 100644 --- a/drivers/net/wireless/atmel_cs.c +++ b/drivers/net/wireless/atmel_cs.c @@ -79,10 +79,9 @@ static int atmel_probe(struct pcmcia_device *p_dev) /* Allocate space for private device-specific data */ local = kzalloc(sizeof(local_info_t), GFP_KERNEL); - if (!local) { - printk(KERN_ERR "atmel_cs: no memory for new device\n"); + if (!local) return -ENOMEM; - } + p_dev->priv = local; return atmel_config(p_dev); diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h index 315b96ed1d90..9fdd1983079c 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/b43/dma.h @@ -169,7 +169,7 @@ struct b43_dmadesc_generic { /* DMA engine tuning knobs */ #define B43_TXRING_SLOTS 256 -#define B43_RXRING_SLOTS 64 +#define B43_RXRING_SLOTS 256 #define B43_DMA0_RX_FW598_BUFSIZE (B43_DMA0_RX_FW598_FO + IEEE80211_MAX_FRAME_LEN) #define B43_DMA0_RX_FW351_BUFSIZE (B43_DMA0_RX_FW351_FO + IEEE80211_MAX_FRAME_LEN) diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c index 97d4e27bf36f..aaca60c6f575 100644 --- a/drivers/net/wireless/b43/tables_nphy.c +++ b/drivers/net/wireless/b43/tables_nphy.c @@ -3226,8 +3226,6 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( { struct nphy_gain_ctl_workaround_entry *e; u8 phy_idx; - u8 tr_iso = ghz5 ? dev->dev->bus_sprom->fem.ghz5.tr_iso : - dev->dev->bus_sprom->fem.ghz2.tr_iso; if (!ghz5 && dev->phy.rev >= 6 && dev->phy.radio_rev == 11) return &nphy_gain_ctl_wa_phy6_radio11_ghz2; @@ -3249,6 +3247,10 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( !b43_channel_type_is_40mhz(dev->phy.channel_type)) e->cliplo_gain = 0x2d; } else if (!ghz5 && dev->phy.rev >= 5) { + static const int gain_data[] = {0x0062, 0x0064, 0x006a, 0x106a, + 0x106c, 0x1074, 0x107c, 0x207c}; + u8 tr_iso = dev->dev->bus_sprom->fem.ghz2.tr_iso; + if (ext_lna) { e->rfseq_init[0] &= ~0x4000; e->rfseq_init[1] &= ~0x4000; @@ -3256,26 +3258,10 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( e->rfseq_init[3] &= ~0x4000; e->init_gain &= ~0x4000; } - switch (tr_iso) { - case 0: - e->cliplo_gain = 0x0062; - case 1: - e->cliplo_gain = 0x0064; - case 2: - e->cliplo_gain = 0x006a; - case 3: - e->cliplo_gain = 0x106a; - case 4: - e->cliplo_gain = 0x106c; - case 5: - e->cliplo_gain = 0x1074; - case 6: - e->cliplo_gain = 0x107c; - case 7: - e->cliplo_gain = 0x207c; - default: - e->cliplo_gain = 0x106a; - } + if (tr_iso > 7) + tr_iso = 3; + e->cliplo_gain = gain_data[tr_iso]; + } else if (ghz5 && dev->phy.rev == 4 && ext_lna) { e->rfseq_init[0] &= ~0x4000; e->rfseq_init[1] &= ~0x4000; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile index 1a6661a9f008..756e19fc2795 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile @@ -26,6 +26,7 @@ brcmfmac-objs += \ wl_cfg80211.o \ fwil.o \ fweh.o \ + p2p.o \ dhd_cdc.o \ dhd_common.o \ dhd_linux.o @@ -37,4 +38,4 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ brcmfmac-$(CONFIG_BRCMFMAC_USB) += \ usb.o brcmfmac-$(CONFIG_BRCMDBG) += \ - dhd_dbg.o
\ No newline at end of file + dhd_dbg.o diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index be35a2f99b1c..11fd1c735589 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -15,8 +15,6 @@ */ /* ****************** SDIO CARD Interface Functions **************************/ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/types.h> #include <linux/netdevice.h> #include <linux/export.h> diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index d33e5598611b..d92d373733d7 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@ -14,8 +14,6 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/types.h> #include <linux/netdevice.h> #include <linux/mmc/sdio.h> diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h index fd672bf53867..ef6f23be6d32 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h @@ -39,6 +39,7 @@ #define BRCMF_C_GET_BSSID 23 #define BRCMF_C_GET_SSID 25 #define BRCMF_C_SET_SSID 26 +#define BRCMF_C_TERMINATED 28 #define BRCMF_C_GET_CHANNEL 29 #define BRCMF_C_SET_CHANNEL 30 #define BRCMF_C_GET_SRL 31 @@ -71,6 +72,7 @@ #define BRCMF_C_SET_WSEC 134 #define BRCMF_C_GET_PHY_NOISE 135 #define BRCMF_C_GET_BSS_INFO 136 +#define BRCMF_C_SET_SCB_TIMEOUT 158 #define BRCMF_C_GET_PHYLIST 180 #define BRCMF_C_SET_SCAN_CHANNEL_TIME 185 #define BRCMF_C_SET_SCAN_UNASSOC_TIME 187 @@ -148,6 +150,7 @@ #define BRCMF_E_REASON_MINTXRATE 9 #define BRCMF_E_REASON_TXFAIL 10 +#define BRCMF_E_REASON_LINK_BSSCFG_DIS 4 #define BRCMF_E_REASON_FAST_ROAM_FAILED 5 #define BRCMF_E_REASON_DIRECTED_ROAM 6 #define BRCMF_E_REASON_TSPEC_REJECTED 7 @@ -374,6 +377,28 @@ struct brcmf_join_params { struct brcmf_assoc_params_le params_le; }; +/* scan params for extended join */ +struct brcmf_join_scan_params_le { + u8 scan_type; /* 0 use default, active or passive scan */ + __le32 nprobes; /* -1 use default, nr of probes per channel */ + __le32 active_time; /* -1 use default, dwell time per channel for + * active scanning + */ + __le32 passive_time; /* -1 use default, dwell time per channel + * for passive scanning + */ + __le32 home_time; /* -1 use default, dwell time for the home + * channel between channel scans + */ +}; + +/* extended join params */ +struct brcmf_ext_join_params_le { + struct brcmf_ssid_le ssid_le; /* {0, ""}: wildcard scan */ + struct brcmf_join_scan_params_le scan_le; + struct brcmf_assoc_params_le assoc_le; +}; + struct brcmf_wsec_key { u32 index; /* key index */ u32 len; /* key length */ @@ -450,6 +475,19 @@ struct brcmf_sta_info_le { __le32 rx_decrypt_failures; /* # of packet decrypted failed */ }; +/* + * WLC_E_PROBRESP_MSG + * WLC_E_P2P_PROBREQ_MSG + * WLC_E_ACTION_FRAME_RX + */ +struct brcmf_rx_mgmt_data { + __be16 version; + __be16 chanspec; + __be32 rssi; + __be32 mactime; + __be32 rate; +}; + /* Bus independent dongle command */ struct brcmf_dcmd { uint cmd; /* common dongle cmd definition */ @@ -480,50 +518,20 @@ struct brcmf_pub { unsigned long drv_version; /* Version of dongle-resident driver */ u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */ - /* Additional stats for the bus level */ - /* Multicast data packets sent to dongle */ unsigned long tx_multicast; - /* Packets flushed due to unscheduled sendup thread */ - unsigned long rx_flushed; - /* Number of times dpc scheduled by watchdog timer */ - unsigned long wd_dpc_sched; - - /* Number of flow control pkts recvd */ - unsigned long fc_packets; - - /* Last error return */ - int bcmerror; - - /* Last error from dongle */ - int dongle_error; - - /* Suspend disable flag flag */ - int suspend_disable_flag; /* "1" to disable all extra powersaving - during suspend */ - int in_suspend; /* flag set to 1 when early suspend called */ - int dtim_skip; /* dtim skip , default 0 means wake each dtim */ struct brcmf_if *iflist[BRCMF_MAX_IFS]; struct mutex proto_block; unsigned char proto_buf[BRCMF_DCMD_MAXLEN]; - u8 macvalue[ETH_ALEN]; - atomic_t pend_8021x_cnt; - wait_queue_head_t pend_8021x_wait; - struct brcmf_fweh_info fweh; #ifdef DEBUG struct dentry *dbgfs_dir; #endif }; -struct bcmevent_name { - uint event; - const char *name; -}; - struct brcmf_if_event { u8 ifidx; u8 action; @@ -541,9 +549,11 @@ struct brcmf_cfg80211_vif; * @vif: points to cfg80211 specific interface information. * @ndev: associated network device. * @stats: interface specific network statistics. - * @idx: interface index in device firmware. + * @ifidx: interface index in device firmware. * @bssidx: index of bss associated with this interface. * @mac_addr: assigned mac address. + * @pend_8021x_cnt: tracks outstanding number of 802.1x frames. + * @pend_8021x_wait: used for signalling change in count. */ struct brcmf_if { struct brcmf_pub *drvr; @@ -552,18 +562,13 @@ struct brcmf_if { struct net_device_stats stats; struct work_struct setmacaddr_work; struct work_struct multicast_work; - int idx; + int ifidx; s32 bssidx; u8 mac_addr[ETH_ALEN]; + atomic_t pend_8021x_cnt; + wait_queue_head_t pend_8021x_wait; }; -static inline s32 brcmf_ndev_bssidx(struct net_device *ndev) -{ - struct brcmf_if *ifp = netdev_priv(ndev); - return ifp->bssidx; -} - -extern const struct bcmevent_name bcmevent_names[]; extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev); @@ -576,9 +581,14 @@ extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf, uint len); -extern int brcmf_net_attach(struct brcmf_if *ifp); -extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, - s32 bssidx, char *name, u8 *mac_addr); -extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx); +/* Remove any protocol-specific data header. */ +extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx, + struct sk_buff *rxp); + +extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked); +extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, + s32 ifidx, char *name, u8 *mac_addr); +extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx); +extern u32 brcmf_get_chip_info(struct brcmf_if *ifp); #endif /* _BRCMF_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h index dd38b78a9726..ad25c3408b59 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h @@ -24,18 +24,6 @@ enum brcmf_bus_state { BRCMF_BUS_DATA /* Ready for frame transfers */ }; -struct dngl_stats { - unsigned long rx_packets; /* total packets received */ - unsigned long tx_packets; /* total packets transmitted */ - unsigned long rx_bytes; /* total bytes received */ - unsigned long tx_bytes; /* total bytes transmitted */ - unsigned long rx_errors; /* bad packets received */ - unsigned long tx_errors; /* packet transmit problems */ - unsigned long rx_dropped; /* packets dropped by dongle */ - unsigned long tx_dropped; /* packets dropped by dongle */ - unsigned long multicast; /* multicast packets received */ -}; - struct brcmf_bus_dcmd { char *name; char *param; @@ -72,11 +60,12 @@ struct brcmf_bus_ops { * @drvr: public driver information. * @state: operational state of the bus interface. * @maxctl: maximum size for rxctl request message. - * @drvr_up: indicates driver up/down status. * @tx_realloc: number of tx packets realloced for headroom. * @dstats: dongle-based statistical data. * @align: alignment requirement for the bus. * @dcmd_list: bus/device specific dongle initialization commands. + * @chip: device identifier of the dongle chip. + * @chiprev: revision of the dongle chip. */ struct brcmf_bus { union { @@ -87,10 +76,10 @@ struct brcmf_bus { struct brcmf_pub *drvr; enum brcmf_bus_state state; uint maxctl; - bool drvr_up; unsigned long tx_realloc; - struct dngl_stats dstats; u8 align; + u32 chip; + u32 chiprev; struct list_head dcmd_list; struct brcmf_bus_ops *ops; @@ -130,31 +119,18 @@ int brcmf_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint len) * interface functions from common layer */ -/* Remove any protocol-specific data header. */ -extern int brcmf_proto_hdrpull(struct device *dev, int *ifidx, - struct sk_buff *rxp); - extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt, int prec); /* Receive frame for delivery to OS. Callee disposes of rxp. */ -extern void brcmf_rx_frame(struct device *dev, u8 ifidx, - struct sk_buff_head *rxlist); -static inline void brcmf_rx_packet(struct device *dev, int ifidx, - struct sk_buff *pkt) -{ - struct sk_buff_head q; - - skb_queue_head_init(&q); - skb_queue_tail(&q, pkt); - brcmf_rx_frame(dev, ifidx, &q); -} +extern void brcmf_rx_frames(struct device *dev, struct sk_buff_head *rxlist); /* Indication from bus module regarding presence/insertion of dongle. */ extern int brcmf_attach(uint bus_hdrlen, struct device *dev); /* Indication from bus module regarding removal/absence of dongle */ extern void brcmf_detach(struct device *dev); - +/* Indication from bus module that dongle should be reset */ +extern void brcmf_dev_reset(struct device *dev); /* Indication from bus module to change flow-control state */ extern void brcmf_txflowblock(struct device *dev, bool state); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c index 83923553f1ac..a2354d951dd7 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c @@ -19,8 +19,6 @@ * For certain dcmd codes, the dongle interprets string data from the host. ******************************************************************************/ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/types.h> #include <linux/netdevice.h> @@ -94,8 +92,6 @@ struct brcmf_proto_bdc_header { struct brcmf_proto { u16 reqid; - u8 pending; - u32 lastcmd; u8 bus_header[BUS_HEADER_LEN]; struct brcmf_proto_cdc_dcmd msg; unsigned char buf[BRCMF_DCMD_MAXLEN + ROUND_UP_MARGIN]; @@ -107,7 +103,7 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr) int len = le32_to_cpu(prot->msg.len) + sizeof(struct brcmf_proto_cdc_dcmd); - brcmf_dbg(TRACE, "Enter\n"); + brcmf_dbg(CDC, "Enter\n"); /* NOTE : cdc->msg.len holds the desired length of the buffer to be * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area @@ -125,7 +121,7 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len) int ret; struct brcmf_proto *prot = drvr->prot; - brcmf_dbg(TRACE, "Enter\n"); + brcmf_dbg(CDC, "Enter\n"); len += sizeof(struct brcmf_proto_cdc_dcmd); do { ret = brcmf_bus_rxctl(drvr->bus_if, (unsigned char *)&prot->msg, @@ -147,20 +143,7 @@ brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, int ret = 0, retries = 0; u32 id, flags; - brcmf_dbg(TRACE, "Enter\n"); - brcmf_dbg(CTL, "cmd %d len %d\n", cmd, len); - - /* Respond "bcmerror" and "bcmerrorstr" with local cache */ - if (cmd == BRCMF_C_GET_VAR && buf) { - if (!strcmp((char *)buf, "bcmerrorstr")) { - strncpy((char *)buf, "bcm_error", - BCME_STRLEN); - goto done; - } else if (!strcmp((char *)buf, "bcmerror")) { - *(int *)buf = drvr->dongle_error; - goto done; - } - } + brcmf_dbg(CDC, "Enter, cmd %d len %d\n", cmd, len); memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd)); @@ -210,11 +193,8 @@ retry: } /* Check the ERROR flag */ - if (flags & CDC_DCMD_ERROR) { + if (flags & CDC_DCMD_ERROR) ret = le32_to_cpu(msg->status); - /* Cache error from dongle */ - drvr->dongle_error = ret; - } done: return ret; @@ -228,8 +208,7 @@ int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, int ret = 0; u32 flags, id; - brcmf_dbg(TRACE, "Enter\n"); - brcmf_dbg(CTL, "cmd %d len %d\n", cmd, len); + brcmf_dbg(CDC, "Enter, cmd %d len %d\n", cmd, len); memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd)); @@ -262,11 +241,8 @@ int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, } /* Check the ERROR flag */ - if (flags & CDC_DCMD_ERROR) { + if (flags & CDC_DCMD_ERROR) ret = le32_to_cpu(msg->status); - /* Cache error from dongle */ - drvr->dongle_error = ret; - } done: return ret; @@ -287,7 +263,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, { struct brcmf_proto_bdc_header *h; - brcmf_dbg(TRACE, "Enter\n"); + brcmf_dbg(CDC, "Enter\n"); /* Push BDC header used to convey priority for buses that don't */ @@ -305,14 +281,12 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, BDC_SET_IF_IDX(h, ifidx); } -int brcmf_proto_hdrpull(struct device *dev, int *ifidx, +int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx, struct sk_buff *pktbuf) { struct brcmf_proto_bdc_header *h; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pub *drvr = bus_if->drvr; - brcmf_dbg(TRACE, "Enter\n"); + brcmf_dbg(CDC, "Enter\n"); /* Pop BDC header used to convey priority for buses that don't */ @@ -329,6 +303,14 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx, brcmf_err("rx data ifnum out of range (%d)\n", *ifidx); return -EBADE; } + /* The ifidx is the idx to map to matching netdev/ifp. When receiving + * events this is easy because it contains the bssidx which maps + * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd. + * bssidx 1 is used for p2p0 and no data can be received or + * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0 + */ + if (*ifidx) + (*ifidx)++; if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) { @@ -338,7 +320,7 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx, } if (h->flags & BDC_FLAG_SUM_GOOD) { - brcmf_dbg(INFO, "%s: BDC packet received with good rx-csum, flags 0x%x\n", + brcmf_dbg(CDC, "%s: BDC rcv, good checksum, flags 0x%x\n", brcmf_ifname(drvr, *ifidx), h->flags); pkt_set_sum_good(pktbuf, true); } @@ -348,6 +330,8 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx, skb_pull(pktbuf, BDC_HEADER_LEN); skb_pull(pktbuf, h->data_offset << 2); + if (pktbuf->len == 0) + return -ENODATA; return 0; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c index f8b52e5b941a..4544342a0428 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c @@ -14,8 +14,6 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/kernel.h> #include <linux/string.h> #include <linux/netdevice.h> diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h index f2ab01cd7966..bc013cbe06f6 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h @@ -18,21 +18,26 @@ #define _BRCMF_DBG_H_ /* message levels */ -#define BRCMF_TRACE_VAL 0x0002 -#define BRCMF_INFO_VAL 0x0004 -#define BRCMF_DATA_VAL 0x0008 -#define BRCMF_CTL_VAL 0x0010 -#define BRCMF_TIMER_VAL 0x0020 -#define BRCMF_HDRS_VAL 0x0040 -#define BRCMF_BYTES_VAL 0x0080 -#define BRCMF_INTR_VAL 0x0100 -#define BRCMF_GLOM_VAL 0x0200 -#define BRCMF_EVENT_VAL 0x0400 -#define BRCMF_BTA_VAL 0x0800 -#define BRCMF_FIL_VAL 0x1000 -#define BRCMF_USB_VAL 0x2000 -#define BRCMF_SCAN_VAL 0x4000 -#define BRCMF_CONN_VAL 0x8000 +#define BRCMF_TRACE_VAL 0x00000002 +#define BRCMF_INFO_VAL 0x00000004 +#define BRCMF_DATA_VAL 0x00000008 +#define BRCMF_CTL_VAL 0x00000010 +#define BRCMF_TIMER_VAL 0x00000020 +#define BRCMF_HDRS_VAL 0x00000040 +#define BRCMF_BYTES_VAL 0x00000080 +#define BRCMF_INTR_VAL 0x00000100 +#define BRCMF_GLOM_VAL 0x00000200 +#define BRCMF_EVENT_VAL 0x00000400 +#define BRCMF_BTA_VAL 0x00000800 +#define BRCMF_FIL_VAL 0x00001000 +#define BRCMF_USB_VAL 0x00002000 +#define BRCMF_SCAN_VAL 0x00004000 +#define BRCMF_CONN_VAL 0x00008000 +#define BRCMF_CDC_VAL 0x00010000 + +/* set default print format */ +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* Macro for error messages. net_ratelimit() is used when driver * debugging is not selected. When debugging the driver error diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 74a616b4de8e..c06cea88df0d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -14,8 +14,6 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/kernel.h> #include <linux/etherdevice.h> #include <linux/module.h> @@ -28,6 +26,8 @@ #include "dhd_bus.h" #include "dhd_proto.h" #include "dhd_dbg.h" +#include "fwil_types.h" +#include "p2p.h" #include "wl_cfg80211.h" #include "fwil.h" @@ -42,6 +42,12 @@ MODULE_LICENSE("Dual BSD/GPL"); int brcmf_msg_level; module_param(brcmf_msg_level, int, 0); +/* P2P0 enable */ +static int brcmf_p2p_enable; +#ifdef CONFIG_BRCMDBG +module_param_named(p2pon, brcmf_p2p_enable, int, 0); +MODULE_PARM_DESC(p2pon, "enable p2p management functionality"); +#endif char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx) { @@ -72,9 +78,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work) u32 buflen; s32 err; - brcmf_dbg(TRACE, "enter\n"); - ifp = container_of(work, struct brcmf_if, multicast_work); + + brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); + ndev = ifp->ndev; /* Determine initial value of allmulti flag */ @@ -131,9 +138,10 @@ _brcmf_set_mac_address(struct work_struct *work) struct brcmf_if *ifp; s32 err; - brcmf_dbg(TRACE, "enter\n"); - ifp = container_of(work, struct brcmf_if, setmacaddr_work); + + brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); + err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr, ETH_ALEN); if (err < 0) { @@ -162,28 +170,31 @@ static void brcmf_netdev_set_multicast_list(struct net_device *ndev) schedule_work(&ifp->multicast_work); } -static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, + struct net_device *ndev) { int ret; struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_pub *drvr = ifp->drvr; + struct ethhdr *eh; - brcmf_dbg(TRACE, "Enter\n"); + brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); - /* Reject if down */ - if (!drvr->bus_if->drvr_up || - (drvr->bus_if->state != BRCMF_BUS_DATA)) { - brcmf_err("xmit rejected drvup=%d state=%d\n", - drvr->bus_if->drvr_up, - drvr->bus_if->state); + /* Can the device send data? */ + if (drvr->bus_if->state != BRCMF_BUS_DATA) { + brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state); netif_stop_queue(ndev); - return -ENODEV; + dev_kfree_skb(skb); + ret = -ENODEV; + goto done; } - if (!drvr->iflist[ifp->idx]) { - brcmf_err("bad ifidx %d\n", ifp->idx); + if (!drvr->iflist[ifp->bssidx]) { + brcmf_err("bad ifidx %d\n", ifp->bssidx); netif_stop_queue(ndev); - return -ENODEV; + dev_kfree_skb(skb); + ret = -ENODEV; + goto done; } /* Make sure there's enough room for any header */ @@ -191,44 +202,49 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct sk_buff *skb2; brcmf_dbg(INFO, "%s: insufficient headroom\n", - brcmf_ifname(drvr, ifp->idx)); + brcmf_ifname(drvr, ifp->bssidx)); drvr->bus_if->tx_realloc++; skb2 = skb_realloc_headroom(skb, drvr->hdrlen); dev_kfree_skb(skb); skb = skb2; if (skb == NULL) { brcmf_err("%s: skb_realloc_headroom failed\n", - brcmf_ifname(drvr, ifp->idx)); + brcmf_ifname(drvr, ifp->bssidx)); ret = -ENOMEM; goto done; } } - /* Update multicast statistic */ - if (skb->len >= ETH_ALEN) { - u8 *pktdata = (u8 *)(skb->data); - struct ethhdr *eh = (struct ethhdr *)pktdata; - - if (is_multicast_ether_addr(eh->h_dest)) - drvr->tx_multicast++; - if (ntohs(eh->h_proto) == ETH_P_PAE) - atomic_inc(&drvr->pend_8021x_cnt); + /* validate length for ether packet */ + if (skb->len < sizeof(*eh)) { + ret = -EINVAL; + dev_kfree_skb(skb); + goto done; } + /* handle ethernet header */ + eh = (struct ethhdr *)(skb->data); + if (is_multicast_ether_addr(eh->h_dest)) + drvr->tx_multicast++; + if (ntohs(eh->h_proto) == ETH_P_PAE) + atomic_inc(&ifp->pend_8021x_cnt); + /* If the protocol uses a data header, apply it */ - brcmf_proto_hdrpush(drvr, ifp->idx, skb); + brcmf_proto_hdrpush(drvr, ifp->ifidx, skb); /* Use bus module to send data frame */ ret = brcmf_bus_txdata(drvr->bus_if, skb); done: - if (ret) - drvr->bus_if->dstats.tx_dropped++; - else - drvr->bus_if->dstats.tx_packets++; + if (ret) { + ifp->stats.tx_dropped++; + } else { + ifp->stats.tx_packets++; + ifp->stats.tx_bytes += skb->len; + } /* Return ok: we always eat the packet */ - return 0; + return NETDEV_TX_OK; } void brcmf_txflowblock(struct device *dev, bool state) @@ -250,8 +266,7 @@ void brcmf_txflowblock(struct device *dev, bool state) } } -void brcmf_rx_frame(struct device *dev, u8 ifidx, - struct sk_buff_head *skb_list) +void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list) { unsigned char *eth; uint len; @@ -259,12 +274,25 @@ void brcmf_rx_frame(struct device *dev, u8 ifidx, struct brcmf_if *ifp; struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_pub *drvr = bus_if->drvr; + u8 ifidx; + int ret; brcmf_dbg(TRACE, "Enter\n"); skb_queue_walk_safe(skb_list, skb, pnext) { skb_unlink(skb, skb_list); + /* process and remove protocol-specific header */ + ret = brcmf_proto_hdrpull(drvr, &ifidx, skb); + ifp = drvr->iflist[ifidx]; + + if (ret || !ifp || !ifp->ndev) { + if ((ret != -ENODATA) && ifp) + ifp->stats.rx_errors++; + brcmu_pkt_buf_free_skb(skb); + continue; + } + /* Get the protocol, maintain skb around eth_type_trans() * The main reason for this hack is for the limitation of * Linux 2.4 where 'eth_type_trans' uses the @@ -280,21 +308,11 @@ void brcmf_rx_frame(struct device *dev, u8 ifidx, eth = skb->data; len = skb->len; - ifp = drvr->iflist[ifidx]; - if (ifp == NULL) - ifp = drvr->iflist[0]; - - if (!ifp || !ifp->ndev || - ifp->ndev->reg_state != NETREG_REGISTERED) { - brcmu_pkt_buf_free_skb(skb); - continue; - } - skb->dev = ifp->ndev; skb->protocol = eth_type_trans(skb, skb->dev); if (skb->pkt_type == PACKET_MULTICAST) - bus_if->dstats.multicast++; + ifp->stats.multicast++; skb->data = eth; skb->len = len; @@ -310,8 +328,13 @@ void brcmf_rx_frame(struct device *dev, u8 ifidx, ifp->ndev->last_rx = jiffies; } - bus_if->dstats.rx_bytes += skb->len; - bus_if->dstats.rx_packets++; /* Local count */ + if (!(ifp->ndev->flags & IFF_UP)) { + brcmu_pkt_buf_free_skb(skb); + continue; + } + + ifp->stats.rx_bytes += skb->len; + ifp->stats.rx_packets++; if (in_interrupt()) netif_rx(skb); @@ -328,41 +351,36 @@ void brcmf_rx_frame(struct device *dev, u8 ifidx, void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success) { - uint ifidx; + u8 ifidx; struct ethhdr *eh; u16 type; struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_pub *drvr = bus_if->drvr; + struct brcmf_if *ifp; + + brcmf_proto_hdrpull(drvr, &ifidx, txp); - brcmf_proto_hdrpull(dev, &ifidx, txp); + ifp = drvr->iflist[ifidx]; + if (!ifp) + return; eh = (struct ethhdr *)(txp->data); type = ntohs(eh->h_proto); if (type == ETH_P_PAE) { - atomic_dec(&drvr->pend_8021x_cnt); - if (waitqueue_active(&drvr->pend_8021x_wait)) - wake_up(&drvr->pend_8021x_wait); + atomic_dec(&ifp->pend_8021x_cnt); + if (waitqueue_active(&ifp->pend_8021x_wait)) + wake_up(&ifp->pend_8021x_wait); } + if (!success) + ifp->stats.tx_errors++; } static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev) { struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_bus *bus_if = ifp->drvr->bus_if; - brcmf_dbg(TRACE, "Enter\n"); - - /* Copy dongle stats to net device stats */ - ifp->stats.rx_packets = bus_if->dstats.rx_packets; - ifp->stats.tx_packets = bus_if->dstats.tx_packets; - ifp->stats.rx_bytes = bus_if->dstats.rx_bytes; - ifp->stats.tx_bytes = bus_if->dstats.tx_bytes; - ifp->stats.rx_errors = bus_if->dstats.rx_errors; - ifp->stats.tx_errors = bus_if->dstats.tx_errors; - ifp->stats.rx_dropped = bus_if->dstats.rx_dropped; - ifp->stats.tx_dropped = bus_if->dstats.tx_dropped; - ifp->stats.multicast = bus_if->dstats.multicast; + brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); return &ifp->stats; } @@ -395,9 +413,11 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev, struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_pub *drvr = ifp->drvr; - sprintf(info->driver, KBUILD_MODNAME); - sprintf(info->version, "%lu", drvr->drv_version); - sprintf(info->bus_info, "%s", dev_name(drvr->bus_if->dev)); + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + snprintf(info->version, sizeof(info->version), "%lu", + drvr->drv_version); + strlcpy(info->bus_info, dev_name(drvr->bus_if->dev), + sizeof(info->bus_info)); } static const struct ethtool_ops brcmf_ethtool_ops = { @@ -414,7 +434,7 @@ static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr) u32 toe_cmpnt, csum_dir; int ret; - brcmf_dbg(TRACE, "Enter\n"); + brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); /* all ethtool calls start with a cmd word */ if (copy_from_user(&cmd, uaddr, sizeof(u32))) @@ -437,20 +457,14 @@ static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr) sprintf(info.driver, "dhd"); strcpy(info.version, BRCMF_VERSION_STR); } - - /* otherwise, require dongle to be up */ - else if (!drvr->bus_if->drvr_up) { - brcmf_err("dongle is not up\n"); - return -ENODEV; - } - /* finally, report dongle driver type */ + /* report dongle driver type */ else sprintf(info.driver, "wl"); sprintf(info.version, "%lu", drvr->drv_version); if (copy_to_user(uaddr, &info, sizeof(info))) return -EFAULT; - brcmf_dbg(CTL, "given %*s, returning %s\n", + brcmf_dbg(TRACE, "given %*s, returning %s\n", (int)sizeof(drvname), drvname, info.driver); break; @@ -517,9 +531,9 @@ static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr, struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_pub *drvr = ifp->drvr; - brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifp->idx, cmd); + brcmf_dbg(TRACE, "Enter, idx=%d, cmd=0x%04x\n", ifp->bssidx, cmd); - if (!drvr->iflist[ifp->idx]) + if (!drvr->iflist[ifp->bssidx]) return -1; if (cmd == SIOCETHTOOL) @@ -531,17 +545,12 @@ static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr, static int brcmf_netdev_stop(struct net_device *ndev) { struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_pub *drvr = ifp->drvr; - - brcmf_dbg(TRACE, "Enter\n"); - if (drvr->bus_if->drvr_up == 0) - return 0; + brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); brcmf_cfg80211_down(ndev); /* Set state and stop OS transmissions */ - drvr->bus_if->drvr_up = false; netif_stop_queue(ndev); return 0; @@ -555,7 +564,7 @@ static int brcmf_netdev_open(struct net_device *ndev) u32 toe_ol; s32 ret = 0; - brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx); + brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); /* If bus is not ready, can't continue */ if (bus_if->state != BRCMF_BUS_DATA) { @@ -563,25 +572,17 @@ static int brcmf_netdev_open(struct net_device *ndev) return -EAGAIN; } - atomic_set(&drvr->pend_8021x_cnt, 0); - - memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN); + atomic_set(&ifp->pend_8021x_cnt, 0); /* Get current TOE mode from dongle */ if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) - drvr->iflist[ifp->idx]->ndev->features |= - NETIF_F_IP_CSUM; + ndev->features |= NETIF_F_IP_CSUM; else - drvr->iflist[ifp->idx]->ndev->features &= - ~NETIF_F_IP_CSUM; - - /* make sure RF is ready for work */ - brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); + ndev->features &= ~NETIF_F_IP_CSUM; /* Allow transmit calls */ netif_start_queue(ndev); - drvr->bus_if->drvr_up = true; if (brcmf_cfg80211_up(ndev)) { brcmf_err("failed to bring up cfg80211\n"); return -1; @@ -600,29 +601,18 @@ static const struct net_device_ops brcmf_netdev_ops_pri = { .ndo_set_rx_mode = brcmf_netdev_set_multicast_list }; -static const struct net_device_ops brcmf_netdev_ops_virt = { - .ndo_open = brcmf_cfg80211_up, - .ndo_stop = brcmf_cfg80211_down, - .ndo_get_stats = brcmf_netdev_get_stats, - .ndo_do_ioctl = brcmf_netdev_ioctl_entry, - .ndo_start_xmit = brcmf_netdev_start_xmit, - .ndo_set_mac_address = brcmf_netdev_set_mac_address, - .ndo_set_rx_mode = brcmf_netdev_set_multicast_list -}; - -int brcmf_net_attach(struct brcmf_if *ifp) +int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked) { struct brcmf_pub *drvr = ifp->drvr; struct net_device *ndev; + s32 err; - brcmf_dbg(TRACE, "ifidx %d mac %pM\n", ifp->idx, ifp->mac_addr); + brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx, + ifp->mac_addr); ndev = ifp->ndev; /* set appropriate operations */ - if (!ifp->idx) - ndev->netdev_ops = &brcmf_netdev_ops_pri; - else - ndev->netdev_ops = &brcmf_netdev_ops_virt; + ndev->netdev_ops = &brcmf_netdev_ops_pri; ndev->hard_header_len = ETH_HLEN + drvr->hdrlen; ndev->ethtool_ops = &brcmf_ethtool_ops; @@ -633,7 +623,14 @@ int brcmf_net_attach(struct brcmf_if *ifp) /* set the mac address */ memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN); - if (register_netdev(ndev) != 0) { + INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address); + INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list); + + if (rtnl_locked) + err = register_netdevice(ndev); + else + err = register_netdev(ndev); + if (err != 0) { brcmf_err("couldn't register the net device\n"); goto fail; } @@ -647,16 +644,78 @@ fail: return -EBADE; } -struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx, - char *name, u8 *addr_mask) +static int brcmf_net_p2p_open(struct net_device *ndev) +{ + brcmf_dbg(TRACE, "Enter\n"); + + return brcmf_cfg80211_up(ndev); +} + +static int brcmf_net_p2p_stop(struct net_device *ndev) +{ + brcmf_dbg(TRACE, "Enter\n"); + + return brcmf_cfg80211_down(ndev); +} + +static int brcmf_net_p2p_do_ioctl(struct net_device *ndev, + struct ifreq *ifr, int cmd) +{ + brcmf_dbg(TRACE, "Enter\n"); + return 0; +} + +static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + if (skb) + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +static const struct net_device_ops brcmf_netdev_ops_p2p = { + .ndo_open = brcmf_net_p2p_open, + .ndo_stop = brcmf_net_p2p_stop, + .ndo_do_ioctl = brcmf_net_p2p_do_ioctl, + .ndo_start_xmit = brcmf_net_p2p_start_xmit +}; + +static int brcmf_net_p2p_attach(struct brcmf_if *ifp) +{ + struct net_device *ndev; + + brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx, + ifp->mac_addr); + ndev = ifp->ndev; + + ndev->netdev_ops = &brcmf_netdev_ops_p2p; + + /* set the mac address */ + memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN); + + if (register_netdev(ndev) != 0) { + brcmf_err("couldn't register the p2p net device\n"); + goto fail; + } + + brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name); + + return 0; + +fail: + return -EBADE; +} + +struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx, + char *name, u8 *mac_addr) { struct brcmf_if *ifp; struct net_device *ndev; - int i; - brcmf_dbg(TRACE, "idx %d\n", ifidx); + brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx); - ifp = drvr->iflist[ifidx]; + ifp = drvr->iflist[bssidx]; /* * Delete the existing interface before overwriting it * in case we missed the BRCMF_E_IF_DEL event. @@ -668,7 +727,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx, netif_stop_queue(ifp->ndev); unregister_netdev(ifp->ndev); free_netdev(ifp->ndev); - drvr->iflist[ifidx] = NULL; + drvr->iflist[bssidx] = NULL; } else { brcmf_err("ignore IF event\n"); return ERR_PTR(-EINVAL); @@ -685,16 +744,15 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx, ifp = netdev_priv(ndev); ifp->ndev = ndev; ifp->drvr = drvr; - drvr->iflist[ifidx] = ifp; - ifp->idx = ifidx; + drvr->iflist[bssidx] = ifp; + ifp->ifidx = ifidx; ifp->bssidx = bssidx; - INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address); - INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list); - if (addr_mask != NULL) - for (i = 0; i < ETH_ALEN; i++) - ifp->mac_addr[i] = drvr->mac[i] ^ addr_mask[i]; + init_waitqueue_head(&ifp->pend_8021x_wait); + + if (mac_addr != NULL) + memcpy(ifp->mac_addr, mac_addr, ETH_ALEN); brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n", current->pid, ifp->ndev->name, ifp->mac_addr); @@ -702,19 +760,18 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx, return ifp; } -void brcmf_del_if(struct brcmf_pub *drvr, int ifidx) +void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx) { struct brcmf_if *ifp; - brcmf_dbg(TRACE, "idx %d\n", ifidx); - - ifp = drvr->iflist[ifidx]; + ifp = drvr->iflist[bssidx]; if (!ifp) { - brcmf_err("Null interface\n"); + brcmf_err("Null interface, idx=%d\n", bssidx); return; } + brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx); if (ifp->ndev) { - if (ifidx == 0) { + if (bssidx == 0) { if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { rtnl_lock(); brcmf_netdev_stop(ifp->ndev); @@ -724,12 +781,14 @@ void brcmf_del_if(struct brcmf_pub *drvr, int ifidx) netif_stop_queue(ifp->ndev); } - cancel_work_sync(&ifp->setmacaddr_work); - cancel_work_sync(&ifp->multicast_work); + if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { + cancel_work_sync(&ifp->setmacaddr_work); + cancel_work_sync(&ifp->multicast_work); + } unregister_netdev(ifp->ndev); - drvr->iflist[ifidx] = NULL; - if (ifidx == 0) + drvr->iflist[bssidx] = NULL; + if (bssidx == 0) brcmf_cfg80211_detach(drvr->config); free_netdev(ifp->ndev); } @@ -769,8 +828,6 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev) INIT_LIST_HEAD(&drvr->bus_if->dcmd_list); - init_waitqueue_head(&drvr->pend_8021x_wait); - return ret; fail: @@ -785,6 +842,7 @@ int brcmf_bus_start(struct device *dev) struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_pub *drvr = bus_if->drvr; struct brcmf_if *ifp; + struct brcmf_if *p2p_ifp; brcmf_dbg(TRACE, "\n"); @@ -800,6 +858,13 @@ int brcmf_bus_start(struct device *dev) if (IS_ERR(ifp)) return PTR_ERR(ifp); + if (brcmf_p2p_enable) + p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL); + else + p2p_ifp = NULL; + if (IS_ERR(p2p_ifp)) + p2p_ifp = NULL; + /* signal bus ready */ bus_if->state = BRCMF_BUS_DATA; @@ -818,16 +883,22 @@ int brcmf_bus_start(struct device *dev) if (ret < 0) goto fail; - ret = brcmf_net_attach(ifp); + ret = brcmf_net_attach(ifp, false); fail: if (ret < 0) { brcmf_err("failed: %d\n", ret); if (drvr->config) brcmf_cfg80211_detach(drvr->config); - free_netdev(drvr->iflist[0]->ndev); + free_netdev(ifp->ndev); drvr->iflist[0] = NULL; + if (p2p_ifp) { + free_netdev(p2p_ifp->ndev); + drvr->iflist[1] = NULL; + } return ret; } + if ((brcmf_p2p_enable) && (p2p_ifp)) + brcmf_net_p2p_attach(p2p_ifp); return 0; } @@ -845,9 +916,21 @@ static void brcmf_bus_detach(struct brcmf_pub *drvr) } } +void brcmf_dev_reset(struct device *dev) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; + + if (drvr == NULL) + return; + + if (drvr->iflist[0]) + brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1); +} + void brcmf_detach(struct device *dev) { - int i; + s32 i; struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_pub *drvr = bus_if->drvr; @@ -866,28 +949,26 @@ void brcmf_detach(struct device *dev) brcmf_bus_detach(drvr); - if (drvr->prot) { + if (drvr->prot) brcmf_proto_detach(drvr); - } brcmf_debugfs_detach(drvr); bus_if->drvr = NULL; kfree(drvr); } -static int brcmf_get_pend_8021x_cnt(struct brcmf_pub *drvr) +static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp) { - return atomic_read(&drvr->pend_8021x_cnt); + return atomic_read(&ifp->pend_8021x_cnt); } int brcmf_netdev_wait_pend8021x(struct net_device *ndev) { struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_pub *drvr = ifp->drvr; int err; - err = wait_event_timeout(drvr->pend_8021x_wait, - !brcmf_get_pend_8021x_cnt(drvr), + err = wait_event_timeout(ifp->pend_8021x_wait, + !brcmf_get_pend_8021x_cnt(ifp), msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX)); WARN_ON(!err); @@ -895,6 +976,16 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev) return !err; } +/* + * return chip id and rev of the device encoded in u32. + */ +u32 brcmf_get_chip_info(struct brcmf_if *ifp) +{ + struct brcmf_bus *bus = ifp->drvr->bus_if; + + return bus->chip << 4 | bus->chiprev; +} + static void brcmf_driver_init(struct work_struct *work) { brcmf_debugfs_init(); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index cf857f1edf8c..4469321c0eb3 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -14,8 +14,6 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/types.h> #include <linux/kernel.h> #include <linux/kthread.h> @@ -1098,7 +1096,6 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header, if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL && type != BRCMF_SDIO_FT_SUPER) { brcmf_err("HW header length too long\n"); - bus->sdiodev->bus_if->dstats.rx_errors++; bus->sdcnt.rx_toolong++; brcmf_sdbrcm_rxfail(bus, false, false); rd->len = 0; @@ -1169,7 +1166,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) int errcode; u8 doff, sfdoff; - int ifidx = 0; bool usechain = bus->use_rxchain; struct brcmf_sdio_read rd_new; @@ -1301,7 +1297,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) if (errcode < 0) { brcmf_err("glom read of %d bytes failed: %d\n", dlen, errcode); - bus->sdiodev->bus_if->dstats.rx_errors++; sdio_claim_host(bus->sdiodev->func[1]); if (bus->glomerr++ < 3) { @@ -1388,13 +1383,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) skb_unlink(pfirst, &bus->glom); brcmu_pkt_buf_free_skb(pfirst); continue; - } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, - &ifidx, pfirst) != 0) { - brcmf_err("rx protocol error\n"); - bus->sdiodev->bus_if->dstats.rx_errors++; - skb_unlink(pfirst, &bus->glom); - brcmu_pkt_buf_free_skb(pfirst); - continue; } brcmf_dbg_hex_dump(BRCMF_GLOM_ON(), @@ -1407,7 +1395,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) } /* sent any remaining packets up */ if (bus->glom.qlen) - brcmf_rx_frame(bus->sdiodev->dev, ifidx, &bus->glom); + brcmf_rx_frames(bus->sdiodev->dev, &bus->glom); bus->sdcnt.rxglomframes++; bus->sdcnt.rxglompkts += bus->glom.qlen; @@ -1455,10 +1443,9 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) if (bus->rxblen) buf = vzalloc(bus->rxblen); - if (!buf) { - brcmf_err("no memory for control frame\n"); + if (!buf) goto done; - } + rbuf = bus->rxbuf; pad = ((unsigned long)rbuf % BRCMF_SDALIGN); if (pad) @@ -1488,7 +1475,6 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) { brcmf_err("%d-byte control read exceeds %d-byte buffer\n", rdlen, bus->sdiodev->bus_if->maxctl); - bus->sdiodev->bus_if->dstats.rx_errors++; brcmf_sdbrcm_rxfail(bus, false, false); goto done; } @@ -1496,7 +1482,6 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) if ((len - doff) > bus->sdiodev->bus_if->maxctl) { brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", len, len - doff, bus->sdiodev->bus_if->maxctl); - bus->sdiodev->bus_if->dstats.rx_errors++; bus->sdcnt.rx_toolong++; brcmf_sdbrcm_rxfail(bus, false, false); goto done; @@ -1558,10 +1543,10 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen) static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) { struct sk_buff *pkt; /* Packet for event or data frames */ + struct sk_buff_head pktlist; /* needed for bus interface */ u16 pad; /* Number of pad bytes to read */ uint rxleft = 0; /* Remaining number of frames allowed */ int sdret; /* Return code from calls */ - int ifidx = 0; uint rxcount = 0; /* Total frames read */ struct brcmf_sdio_read *rd = &bus->cur_read, rd_new; u8 head_read = 0; @@ -1644,7 +1629,6 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) if (!pkt) { /* Give up on data, request rtx of events */ brcmf_err("brcmu_pkt_buf_get_skb failed\n"); - bus->sdiodev->bus_if->dstats.rx_dropped++; brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(rd->channel)); sdio_release_host(bus->sdiodev->func[1]); @@ -1662,7 +1646,6 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) brcmf_err("read %d bytes from channel %d failed: %d\n", rd->len, rd->channel, sdret); brcmu_pkt_buf_free_skb(pkt); - bus->sdiodev->bus_if->dstats.rx_errors++; sdio_claim_host(bus->sdiodev->func[1]); brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(rd->channel)); @@ -1760,15 +1743,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) if (pkt->len == 0) { brcmu_pkt_buf_free_skb(pkt); continue; - } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, &ifidx, - pkt) != 0) { - brcmf_err("rx protocol error\n"); - brcmu_pkt_buf_free_skb(pkt); - bus->sdiodev->bus_if->dstats.rx_errors++; - continue; } - brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt); + skb_queue_head_init(&pktlist); + skb_queue_tail(&pktlist, pkt); + brcmf_rx_frames(bus->sdiodev->dev, &pktlist); } rxcount = maxframes - rxleft; @@ -1954,10 +1933,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes) datalen = pkt->len - SDPCM_HDRLEN; ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true); - if (ret) - bus->sdiodev->bus_if->dstats.tx_errors++; - else - bus->sdiodev->bus_if->dstats.tx_bytes += datalen; /* In poll mode, need to check for other events */ if (!bus->intr && cnt) { @@ -1976,8 +1951,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes) } /* Deflow-control stack if needed */ - if (bus->sdiodev->bus_if->drvr_up && - (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) && + if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) && bus->txoff && (pktq_len(&bus->txq) < TXLOW)) { bus->txoff = false; brcmf_txflowblock(bus->sdiodev->dev, false); @@ -2724,9 +2698,10 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus, * address of sdpcm_shared structure */ sdio_claim_host(bus->sdiodev->func[1]); + brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); rv = brcmf_sdbrcm_membytes(bus, false, shaddr, (u8 *)&addr_le, 4); - sdio_claim_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func[1]); if (rv < 0) return rv; @@ -2745,10 +2720,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus, } /* Read hndrte_shared structure */ - sdio_claim_host(bus->sdiodev->func[1]); rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le, sizeof(struct sdpcm_shared_le)); - sdio_release_host(bus->sdiodev->func[1]); if (rv < 0) return rv; @@ -2850,14 +2823,12 @@ static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh, if ((sh->flags & SDPCM_SHARED_TRAP) == 0) return 0; - sdio_claim_host(bus->sdiodev->func[1]); error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr, sizeof(struct brcmf_trap_info)); if (error < 0) return error; nbytes = brcmf_sdio_dump_console(bus, sh, data, count); - sdio_release_host(bus->sdiodev->func[1]); if (nbytes < 0) return nbytes; @@ -3322,9 +3293,6 @@ static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus) { int ret; - if (bus->sdiodev->bus_if->drvr_up) - return -EISCONN; - ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME, &bus->sdiodev->func[2]->dev); if (ret) { @@ -3955,6 +3923,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) /* Assign bus interface call back */ bus->sdiodev->bus_if->dev = bus->sdiodev->dev; bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops; + bus->sdiodev->bus_if->chip = bus->ci->chip; + bus->sdiodev->bus_if->chiprev = bus->ci->chiprev; /* Attach to the brcmf/OS/network interface */ ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c index ba0b22512f12..e9d6f91a1f2b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c @@ -189,24 +189,24 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, return; } - ifp = drvr->iflist[ifevent->ifidx]; + ifp = drvr->iflist[ifevent->bssidx]; if (ifevent->action == BRCMF_E_IF_ADD) { brcmf_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname, emsg->addr); - ifp = brcmf_add_if(drvr, ifevent->ifidx, ifevent->bssidx, + ifp = brcmf_add_if(drvr, ifevent->bssidx, ifevent->ifidx, emsg->ifname, emsg->addr); if (IS_ERR(ifp)) return; if (!drvr->fweh.evt_handler[BRCMF_E_IF]) - err = brcmf_net_attach(ifp); + err = brcmf_net_attach(ifp, false); } err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); if (ifevent->action == BRCMF_E_IF_DEL) - brcmf_del_if(drvr, ifevent->ifidx); + brcmf_del_if(drvr, ifevent->bssidx); } /** @@ -250,8 +250,6 @@ static void brcmf_fweh_event_worker(struct work_struct *work) drvr = container_of(fweh, struct brcmf_pub, fweh); while ((event = brcmf_fweh_dequeue_event(fweh))) { - ifp = drvr->iflist[event->ifidx]; - brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n", brcmf_fweh_event_name(event->code), event->code, event->emsg.ifidx, event->emsg.bsscfgidx, @@ -283,6 +281,7 @@ static void brcmf_fweh_event_worker(struct work_struct *work) goto event_free; } + ifp = drvr->iflist[emsg.bsscfgidx]; err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg, event->data); if (err) { diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h index 36901f76a3b5..8c39b51dcccf 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h @@ -83,6 +83,7 @@ struct brcmf_event; BRCMF_ENUM_DEF(MULTICAST_DECODE_ERROR, 51) \ BRCMF_ENUM_DEF(TRACE, 52) \ BRCMF_ENUM_DEF(IF, 54) \ + BRCMF_ENUM_DEF(P2P_DISC_LISTEN_COMPLETE, 55) \ BRCMF_ENUM_DEF(RSSI, 56) \ BRCMF_ENUM_DEF(PFN_SCAN_COMPLETE, 57) \ BRCMF_ENUM_DEF(EXTLOG_MSG, 58) \ @@ -96,8 +97,11 @@ struct brcmf_event; BRCMF_ENUM_DEF(DFS_AP_RESUME, 66) \ BRCMF_ENUM_DEF(ESCAN_RESULT, 69) \ BRCMF_ENUM_DEF(ACTION_FRAME_OFF_CHAN_COMPLETE, 70) \ + BRCMF_ENUM_DEF(PROBERESP_MSG, 71) \ + BRCMF_ENUM_DEF(P2P_PROBEREQ_MSG, 72) \ BRCMF_ENUM_DEF(DCS_REQUEST, 73) \ - BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) + BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \ + BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) #define BRCMF_ENUM_DEF(id, val) \ BRCMF_E_##id = (val), diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c index d8d8b6549dc5..8d1def935b8d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c @@ -45,9 +45,10 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set) if (data != NULL) len = min_t(uint, len, BRCMF_DCMD_MAXLEN); if (set) - err = brcmf_proto_cdc_set_dcmd(drvr, ifp->idx, cmd, data, len); + err = brcmf_proto_cdc_set_dcmd(drvr, ifp->ifidx, cmd, data, + len); else - err = brcmf_proto_cdc_query_dcmd(drvr, ifp->idx, cmd, data, + err = brcmf_proto_cdc_query_dcmd(drvr, ifp->ifidx, cmd, data, len); if (err >= 0) @@ -100,6 +101,7 @@ brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data) __le32 data_le = cpu_to_le32(data); mutex_lock(&ifp->drvr->proto_block); + brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, data); err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true); mutex_unlock(&ifp->drvr->proto_block); @@ -116,6 +118,7 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data) err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false); mutex_unlock(&ifp->drvr->proto_block); *data = le32_to_cpu(data_le); + brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, *data); return err; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h new file mode 100644 index 000000000000..0f2c83bc95dc --- /dev/null +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2012 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +#ifndef FWIL_TYPES_H_ +#define FWIL_TYPES_H_ + +#include <linux/if_ether.h> + + +#define BRCMF_FIL_ACTION_FRAME_SIZE 1800 + + +enum brcmf_fil_p2p_if_types { + BRCMF_FIL_P2P_IF_CLIENT, + BRCMF_FIL_P2P_IF_GO, + BRCMF_FIL_P2P_IF_DYNBCN_GO, + BRCMF_FIL_P2P_IF_DEV, +}; + +struct brcmf_fil_p2p_if_le { + u8 addr[ETH_ALEN]; + __le16 type; + __le16 chspec; +}; + +struct brcmf_fil_chan_info_le { + __le32 hw_channel; + __le32 target_channel; + __le32 scan_channel; +}; + +struct brcmf_fil_action_frame_le { + u8 da[ETH_ALEN]; + __le16 len; + __le32 packet_id; + u8 data[BRCMF_FIL_ACTION_FRAME_SIZE]; +}; + +struct brcmf_fil_af_params_le { + __le32 channel; + __le32 dwell_time; + u8 bssid[ETH_ALEN]; + u8 pad[2]; + struct brcmf_fil_action_frame_le action_frame; +}; + +struct brcmf_fil_bss_enable_le { + __le32 bsscfg_idx; + __le32 enable; +}; + +#endif /* FWIL_TYPES_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c new file mode 100644 index 000000000000..4166e642068b --- /dev/null +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -0,0 +1,2277 @@ +/* + * Copyright (c) 2012 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include <linux/slab.h> +#include <linux/netdevice.h> +#include <net/cfg80211.h> + +#include <brcmu_wifi.h> +#include <brcmu_utils.h> +#include <defs.h> +#include <dhd.h> +#include <dhd_dbg.h> +#include "fwil.h" +#include "fwil_types.h" +#include "p2p.h" +#include "wl_cfg80211.h" + +/* parameters used for p2p escan */ +#define P2PAPI_SCAN_NPROBES 1 +#define P2PAPI_SCAN_DWELL_TIME_MS 80 +#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40 +#define P2PAPI_SCAN_HOME_TIME_MS 60 +#define P2PAPI_SCAN_NPROBS_TIME_MS 30 +#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100 +#define WL_SCAN_CONNECT_DWELL_TIME_MS 200 +#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20 + +#define BRCMF_P2P_WILDCARD_SSID "DIRECT-" +#define BRCMF_P2P_WILDCARD_SSID_LEN (sizeof(BRCMF_P2P_WILDCARD_SSID) - 1) + +#define SOCIAL_CHAN_1 1 +#define SOCIAL_CHAN_2 6 +#define SOCIAL_CHAN_3 11 +#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \ + (channel == SOCIAL_CHAN_2) || \ + (channel == SOCIAL_CHAN_3)) +#define SOCIAL_CHAN_CNT 3 +#define AF_PEER_SEARCH_CNT 2 + +#define BRCMF_SCB_TIMEOUT_VALUE 20 + +#define P2P_VER 9 /* P2P version: 9=WiFi P2P v1.0 */ +#define P2P_PUB_AF_CATEGORY 0x04 +#define P2P_PUB_AF_ACTION 0x09 +#define P2P_AF_CATEGORY 0x7f +#define P2P_OUI "\x50\x6F\x9A" /* P2P OUI */ +#define P2P_OUI_LEN 3 /* P2P OUI length */ + +/* Action Frame Constants */ +#define DOT11_ACTION_HDR_LEN 2 /* action frame category + action */ +#define DOT11_ACTION_CAT_OFF 0 /* category offset */ +#define DOT11_ACTION_ACT_OFF 1 /* action offset */ + +#define P2P_AF_DWELL_TIME 200 +#define P2P_AF_MIN_DWELL_TIME 100 +#define P2P_AF_MED_DWELL_TIME 400 +#define P2P_AF_LONG_DWELL_TIME 1000 +#define P2P_AF_TX_MAX_RETRY 1 +#define P2P_AF_MAX_WAIT_TIME 2000 +#define P2P_INVALID_CHANNEL -1 +#define P2P_CHANNEL_SYNC_RETRY 5 +#define P2P_AF_FRM_SCAN_MAX_WAIT 1500 +#define P2P_DEFAULT_SLEEP_TIME_VSDB 200 + +/* WiFi P2P Public Action Frame OUI Subtypes */ +#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */ +#define P2P_PAF_GON_RSP 1 /* Group Owner Negotiation Rsp */ +#define P2P_PAF_GON_CONF 2 /* Group Owner Negotiation Confirm */ +#define P2P_PAF_INVITE_REQ 3 /* P2P Invitation Request */ +#define P2P_PAF_INVITE_RSP 4 /* P2P Invitation Response */ +#define P2P_PAF_DEVDIS_REQ 5 /* Device Discoverability Request */ +#define P2P_PAF_DEVDIS_RSP 6 /* Device Discoverability Response */ +#define P2P_PAF_PROVDIS_REQ 7 /* Provision Discovery Request */ +#define P2P_PAF_PROVDIS_RSP 8 /* Provision Discovery Response */ +#define P2P_PAF_SUBTYPE_INVALID 255 /* Invalid Subtype */ + +/* WiFi P2P Action Frame OUI Subtypes */ +#define P2P_AF_NOTICE_OF_ABSENCE 0 /* Notice of Absence */ +#define P2P_AF_PRESENCE_REQ 1 /* P2P Presence Request */ +#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */ +#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */ + +/* P2P Service Discovery related */ +#define P2PSD_ACTION_CATEGORY 0x04 /* Public action frame */ +#define P2PSD_ACTION_ID_GAS_IREQ 0x0a /* GAS Initial Request AF */ +#define P2PSD_ACTION_ID_GAS_IRESP 0x0b /* GAS Initial Response AF */ +#define P2PSD_ACTION_ID_GAS_CREQ 0x0c /* GAS Comback Request AF */ +#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comback Response AF */ + +/** + * struct brcmf_p2p_disc_st_le - set discovery state in firmware. + * + * @state: requested discovery state (see enum brcmf_p2p_disc_state). + * @chspec: channel parameter for %WL_P2P_DISC_ST_LISTEN state. + * @dwell: dwell time in ms for %WL_P2P_DISC_ST_LISTEN state. + */ +struct brcmf_p2p_disc_st_le { + u8 state; + __le16 chspec; + __le16 dwell; +}; + +/** + * enum brcmf_p2p_disc_state - P2P discovery state values + * + * @WL_P2P_DISC_ST_SCAN: P2P discovery with wildcard SSID and P2P IE. + * @WL_P2P_DISC_ST_LISTEN: P2P discovery off-channel for specified time. + * @WL_P2P_DISC_ST_SEARCH: P2P discovery with P2P wildcard SSID and P2P IE. + */ +enum brcmf_p2p_disc_state { + WL_P2P_DISC_ST_SCAN, + WL_P2P_DISC_ST_LISTEN, + WL_P2P_DISC_ST_SEARCH +}; + +/** + * struct brcmf_p2p_scan_le - P2P specific scan request. + * + * @type: type of scan method requested (values: 'E' or 'S'). + * @reserved: reserved (ignored). + * @eparams: parameters used for type 'E'. + * @sparams: parameters used for type 'S'. + */ +struct brcmf_p2p_scan_le { + u8 type; + u8 reserved[3]; + union { + struct brcmf_escan_params_le eparams; + struct brcmf_scan_params_le sparams; + }; +}; + +/** + * struct brcmf_p2p_pub_act_frame - WiFi P2P Public Action Frame + * + * @category: P2P_PUB_AF_CATEGORY + * @action: P2P_PUB_AF_ACTION + * @oui[3]: P2P_OUI + * @oui_type: OUI type - P2P_VER + * @subtype: OUI subtype - P2P_TYPE_* + * @dialog_token: nonzero, identifies req/rsp transaction + * @elts[1]: Variable length information elements. + */ +struct brcmf_p2p_pub_act_frame { + u8 category; + u8 action; + u8 oui[3]; + u8 oui_type; + u8 subtype; + u8 dialog_token; + u8 elts[1]; +}; + +/** + * struct brcmf_p2p_action_frame - WiFi P2P Action Frame + * + * @category: P2P_AF_CATEGORY + * @OUI[3]: OUI - P2P_OUI + * @type: OUI Type - P2P_VER + * @subtype: OUI Subtype - P2P_AF_* + * @dialog_token: nonzero, identifies req/resp tranaction + * @elts[1]: Variable length information elements. + */ +struct brcmf_p2p_action_frame { + u8 category; + u8 oui[3]; + u8 type; + u8 subtype; + u8 dialog_token; + u8 elts[1]; +}; + +/** + * struct brcmf_p2psd_gas_pub_act_frame - Wi-Fi GAS Public Action Frame + * + * @category: 0x04 Public Action Frame + * @action: 0x6c Advertisement Protocol + * @dialog_token: nonzero, identifies req/rsp transaction + * @query_data[1]: Query Data. SD gas ireq SD gas iresp + */ +struct brcmf_p2psd_gas_pub_act_frame { + u8 category; + u8 action; + u8 dialog_token; + u8 query_data[1]; +}; + +/** + * struct brcmf_config_af_params - Action Frame Parameters for tx. + * + * @mpc_onoff: To make sure to send successfully action frame, we have to + * turn off mpc 0: off, 1: on, (-1): do nothing + * @search_channel: 1: search peer's channel to send af + * extra_listen: keep the dwell time to get af response frame. + */ +struct brcmf_config_af_params { + s32 mpc_onoff; + bool search_channel; + bool extra_listen; +}; + +/** + * brcmf_p2p_is_pub_action() - true if p2p public type frame. + * + * @frame: action frame data. + * @frame_len: length of action frame data. + * + * Determine if action frame is p2p public action type + */ +static bool brcmf_p2p_is_pub_action(void *frame, u32 frame_len) +{ + struct brcmf_p2p_pub_act_frame *pact_frm; + + if (frame == NULL) + return false; + + pact_frm = (struct brcmf_p2p_pub_act_frame *)frame; + if (frame_len < sizeof(struct brcmf_p2p_pub_act_frame) - 1) + return false; + + if (pact_frm->category == P2P_PUB_AF_CATEGORY && + pact_frm->action == P2P_PUB_AF_ACTION && + pact_frm->oui_type == P2P_VER && + memcmp(pact_frm->oui, P2P_OUI, P2P_OUI_LEN) == 0) + return true; + + return false; +} + +/** + * brcmf_p2p_is_p2p_action() - true if p2p action type frame. + * + * @frame: action frame data. + * @frame_len: length of action frame data. + * + * Determine if action frame is p2p action type + */ +static bool brcmf_p2p_is_p2p_action(void *frame, u32 frame_len) +{ + struct brcmf_p2p_action_frame *act_frm; + + if (frame == NULL) + return false; + + act_frm = (struct brcmf_p2p_action_frame *)frame; + if (frame_len < sizeof(struct brcmf_p2p_action_frame) - 1) + return false; + + if (act_frm->category == P2P_AF_CATEGORY && + act_frm->type == P2P_VER && + memcmp(act_frm->oui, P2P_OUI, P2P_OUI_LEN) == 0) + return true; + + return false; +} + +/** + * brcmf_p2p_is_gas_action() - true if p2p gas action type frame. + * + * @frame: action frame data. + * @frame_len: length of action frame data. + * + * Determine if action frame is p2p gas action type + */ +static bool brcmf_p2p_is_gas_action(void *frame, u32 frame_len) +{ + struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm; + + if (frame == NULL) + return false; + + sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame; + if (frame_len < sizeof(struct brcmf_p2psd_gas_pub_act_frame) - 1) + return false; + + if (sd_act_frm->category != P2PSD_ACTION_CATEGORY) + return false; + + if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP) + return true; + + return false; +} + +/** + * brcmf_p2p_print_actframe() - debug print routine. + * + * @tx: Received or to be transmitted + * @frame: action frame data. + * @frame_len: length of action frame data. + * + * Print information about the p2p action frame + */ + +#ifdef DEBUG + +static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len) +{ + struct brcmf_p2p_pub_act_frame *pact_frm; + struct brcmf_p2p_action_frame *act_frm; + struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm; + + if (!frame || frame_len <= 2) + return; + + if (brcmf_p2p_is_pub_action(frame, frame_len)) { + pact_frm = (struct brcmf_p2p_pub_act_frame *)frame; + switch (pact_frm->subtype) { + case P2P_PAF_GON_REQ: + brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Req Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_GON_RSP: + brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Rsp Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_GON_CONF: + brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Confirm Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_INVITE_REQ: + brcmf_dbg(TRACE, "%s P2P Invitation Request Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_INVITE_RSP: + brcmf_dbg(TRACE, "%s P2P Invitation Response Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_DEVDIS_REQ: + brcmf_dbg(TRACE, "%s P2P Device Discoverability Request Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_DEVDIS_RSP: + brcmf_dbg(TRACE, "%s P2P Device Discoverability Response Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_PROVDIS_REQ: + brcmf_dbg(TRACE, "%s P2P Provision Discovery Request Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_PROVDIS_RSP: + brcmf_dbg(TRACE, "%s P2P Provision Discovery Response Frame\n", + (tx) ? "TX" : "RX"); + break; + default: + brcmf_dbg(TRACE, "%s Unknown P2P Public Action Frame\n", + (tx) ? "TX" : "RX"); + break; + } + } else if (brcmf_p2p_is_p2p_action(frame, frame_len)) { + act_frm = (struct brcmf_p2p_action_frame *)frame; + switch (act_frm->subtype) { + case P2P_AF_NOTICE_OF_ABSENCE: + brcmf_dbg(TRACE, "%s P2P Notice of Absence Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_AF_PRESENCE_REQ: + brcmf_dbg(TRACE, "%s P2P Presence Request Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_AF_PRESENCE_RSP: + brcmf_dbg(TRACE, "%s P2P Presence Response Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_AF_GO_DISC_REQ: + brcmf_dbg(TRACE, "%s P2P Discoverability Request Frame\n", + (tx) ? "TX" : "RX"); + break; + default: + brcmf_dbg(TRACE, "%s Unknown P2P Action Frame\n", + (tx) ? "TX" : "RX"); + } + + } else if (brcmf_p2p_is_gas_action(frame, frame_len)) { + sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame; + switch (sd_act_frm->action) { + case P2PSD_ACTION_ID_GAS_IREQ: + brcmf_dbg(TRACE, "%s P2P GAS Initial Request\n", + (tx) ? "TX" : "RX"); + break; + case P2PSD_ACTION_ID_GAS_IRESP: + brcmf_dbg(TRACE, "%s P2P GAS Initial Response\n", + (tx) ? "TX" : "RX"); + break; + case P2PSD_ACTION_ID_GAS_CREQ: + brcmf_dbg(TRACE, "%s P2P GAS Comback Request\n", + (tx) ? "TX" : "RX"); + break; + case P2PSD_ACTION_ID_GAS_CRESP: + brcmf_dbg(TRACE, "%s P2P GAS Comback Response\n", + (tx) ? "TX" : "RX"); + break; + default: + brcmf_dbg(TRACE, "%s Unknown P2P GAS Frame\n", + (tx) ? "TX" : "RX"); + break; + } + } +} + +#else + +static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len) +{ +} + +#endif + + +/** + * brcmf_p2p_chnr_to_chspec() - convert channel number to chanspec. + * + * @channel: channel number + */ +static u16 brcmf_p2p_chnr_to_chspec(u16 channel) +{ + u16 chanspec; + + chanspec = channel & WL_CHANSPEC_CHAN_MASK; + + if (channel <= CH_MAX_2G_CHANNEL) + chanspec |= WL_CHANSPEC_BAND_2G; + else + chanspec |= WL_CHANSPEC_BAND_5G; + + chanspec |= WL_CHANSPEC_BW_20; + chanspec |= WL_CHANSPEC_CTL_SB_NONE; + + return chanspec; +} + + +/** + * brcmf_p2p_set_firmware() - prepare firmware for peer-to-peer operation. + * + * @ifp: ifp to use for iovars (primary). + * @p2p_mac: mac address to configure for p2p_da_override + */ +static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac) +{ + s32 ret = 0; + + brcmf_fil_iovar_int_set(ifp, "apsta", 1); + + /* In case of COB type, firmware has default mac address + * After Initializing firmware, we have to set current mac address to + * firmware for P2P device address + */ + ret = brcmf_fil_iovar_data_set(ifp, "p2p_da_override", p2p_mac, + ETH_ALEN); + if (ret) + brcmf_err("failed to update device address ret %d\n", ret); + + return ret; +} + +/** + * brcmf_p2p_generate_bss_mac() - derive mac addresses for P2P. + * + * @p2p: P2P specific data. + * + * P2P needs mac addresses for P2P device and interface. These are + * derived from the primary net device, ie. the permanent ethernet + * address of the device. + */ +static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p) +{ + struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; + struct brcmf_if *p2p_ifp = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->ifp; + + /* Generate the P2P Device Address. This consists of the device's + * primary MAC address with the locally administered bit set. + */ + memcpy(p2p->dev_addr, pri_ifp->mac_addr, ETH_ALEN); + p2p->dev_addr[0] |= 0x02; + memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN); + + /* Generate the P2P Interface Address. If the discovery and connection + * BSSCFGs need to simultaneously co-exist, then this address must be + * different from the P2P Device Address, but also locally administered. + */ + memcpy(p2p->int_addr, p2p->dev_addr, ETH_ALEN); + p2p->int_addr[4] ^= 0x80; +} + +/** + * brcmf_p2p_scan_is_p2p_request() - is cfg80211 scan request a P2P scan. + * + * @request: the scan request as received from cfg80211. + * + * returns true if one of the ssids in the request matches the + * P2P wildcard ssid; otherwise returns false. + */ +static bool brcmf_p2p_scan_is_p2p_request(struct cfg80211_scan_request *request) +{ + struct cfg80211_ssid *ssids = request->ssids; + int i; + + for (i = 0; i < request->n_ssids; i++) { + if (ssids[i].ssid_len != BRCMF_P2P_WILDCARD_SSID_LEN) + continue; + + brcmf_dbg(INFO, "comparing ssid \"%s\"", ssids[i].ssid); + if (!memcmp(BRCMF_P2P_WILDCARD_SSID, ssids[i].ssid, + BRCMF_P2P_WILDCARD_SSID_LEN)) + return true; + } + return false; +} + +/** + * brcmf_p2p_set_discover_state - set discover state in firmware. + * + * @ifp: low-level interface object. + * @state: discover state to set. + * @chanspec: channel parameters (for state @WL_P2P_DISC_ST_LISTEN only). + * @listen_ms: duration to listen (for state @WL_P2P_DISC_ST_LISTEN only). + */ +static s32 brcmf_p2p_set_discover_state(struct brcmf_if *ifp, u8 state, + u16 chanspec, u16 listen_ms) +{ + struct brcmf_p2p_disc_st_le discover_state; + s32 ret = 0; + brcmf_dbg(TRACE, "enter\n"); + + discover_state.state = state; + discover_state.chspec = cpu_to_le16(chanspec); + discover_state.dwell = cpu_to_le16(listen_ms); + ret = brcmf_fil_bsscfg_data_set(ifp, "p2p_state", &discover_state, + sizeof(discover_state)); + return ret; +} + +/** + * brcmf_p2p_deinit_discovery() - disable P2P device discovery. + * + * @p2p: P2P specific data. + * + * Resets the discovery state and disables it in firmware. + */ +static s32 brcmf_p2p_deinit_discovery(struct brcmf_p2p_info *p2p) +{ + struct brcmf_cfg80211_vif *vif; + + brcmf_dbg(TRACE, "enter\n"); + + /* Set the discovery state to SCAN */ + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + (void)brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0); + + /* Disable P2P discovery in the firmware */ + vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; + (void)brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 0); + + return 0; +} + +/** + * brcmf_p2p_enable_discovery() - initialize and configure discovery. + * + * @p2p: P2P specific data. + * + * Initializes the discovery device and configure the virtual interface. + */ +static int brcmf_p2p_enable_discovery(struct brcmf_p2p_info *p2p) +{ + struct brcmf_cfg80211_vif *vif; + s32 ret = 0; + + brcmf_dbg(TRACE, "enter\n"); + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + if (!vif) { + brcmf_err("P2P config device not available\n"); + ret = -EPERM; + goto exit; + } + + if (test_bit(BRCMF_P2P_STATUS_ENABLED, &p2p->status)) { + brcmf_dbg(INFO, "P2P config device already configured\n"); + goto exit; + } + + /* Re-initialize P2P Discovery in the firmware */ + vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; + ret = brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 1); + if (ret < 0) { + brcmf_err("set p2p_disc error\n"); + goto exit; + } + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + ret = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0); + if (ret < 0) { + brcmf_err("unable to set WL_P2P_DISC_ST_SCAN\n"); + goto exit; + } + + /* + * Set wsec to any non-zero value in the discovery bsscfg + * to ensure our P2P probe responses have the privacy bit + * set in the 802.11 WPA IE. Some peer devices may not + * initiate WPS with us if this bit is not set. + */ + ret = brcmf_fil_bsscfg_int_set(vif->ifp, "wsec", AES_ENABLED); + if (ret < 0) { + brcmf_err("wsec error %d\n", ret); + goto exit; + } + + set_bit(BRCMF_P2P_STATUS_ENABLED, &p2p->status); +exit: + return ret; +} + +/** + * brcmf_p2p_escan() - initiate a P2P scan. + * + * @p2p: P2P specific data. + * @num_chans: number of channels to scan. + * @chanspecs: channel parameters for @num_chans channels. + * @search_state: P2P discover state to use. + * @action: scan action to pass to firmware. + * @bss_type: type of P2P bss. + */ +static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, + u16 chanspecs[], s32 search_state, u16 action, + enum p2p_bss_type bss_type) +{ + s32 ret = 0; + s32 memsize = offsetof(struct brcmf_p2p_scan_le, + eparams.params_le.channel_list); + s32 nprobes; + s32 active; + u32 i; + u8 *memblk; + struct brcmf_cfg80211_vif *vif; + struct brcmf_p2p_scan_le *p2p_params; + struct brcmf_scan_params_le *sparams; + struct brcmf_ssid ssid; + + memsize += num_chans * sizeof(__le16); + memblk = kzalloc(memsize, GFP_KERNEL); + if (!memblk) + return -ENOMEM; + + vif = p2p->bss_idx[bss_type].vif; + if (vif == NULL) { + brcmf_err("no vif for bss type %d\n", bss_type); + ret = -EINVAL; + goto exit; + } + + switch (search_state) { + case WL_P2P_DISC_ST_SEARCH: + /* + * If we in SEARCH STATE, we don't need to set SSID explictly + * because dongle use P2P WILDCARD internally by default + */ + /* use null ssid */ + ssid.SSID_len = 0; + memset(ssid.SSID, 0, sizeof(ssid.SSID)); + break; + case WL_P2P_DISC_ST_SCAN: + /* + * wpa_supplicant has p2p_find command with type social or + * progressive. For progressive, we need to set the ssid to + * P2P WILDCARD because we just do broadcast scan unless + * setting SSID. + */ + ssid.SSID_len = BRCMF_P2P_WILDCARD_SSID_LEN; + memcpy(ssid.SSID, BRCMF_P2P_WILDCARD_SSID, ssid.SSID_len); + break; + default: + brcmf_err(" invalid search state %d\n", search_state); + ret = -EINVAL; + goto exit; + } + + brcmf_p2p_set_discover_state(vif->ifp, search_state, 0, 0); + + /* + * set p2p scan parameters. + */ + p2p_params = (struct brcmf_p2p_scan_le *)memblk; + p2p_params->type = 'E'; + + /* determine the scan engine parameters */ + sparams = &p2p_params->eparams.params_le; + sparams->bss_type = DOT11_BSSTYPE_ANY; + if (p2p->cfg->active_scan) + sparams->scan_type = 0; + else + sparams->scan_type = 1; + + memset(&sparams->bssid, 0xFF, ETH_ALEN); + if (ssid.SSID_len) + memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len); + sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); + sparams->home_time = cpu_to_le32(P2PAPI_SCAN_HOME_TIME_MS); + + /* + * SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan + * supported by the supplicant. + */ + if (num_chans == SOCIAL_CHAN_CNT || num_chans == (SOCIAL_CHAN_CNT + 1)) + active = P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS; + else if (num_chans == AF_PEER_SEARCH_CNT) + active = P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS; + else if (wl_get_vif_state_all(p2p->cfg, BRCMF_VIF_STATUS_CONNECTED)) + active = -1; + else + active = P2PAPI_SCAN_DWELL_TIME_MS; + + /* Override scan params to find a peer for a connection */ + if (num_chans == 1) { + active = WL_SCAN_CONNECT_DWELL_TIME_MS; + /* WAR to sync with presence period of VSDB GO. + * send probe request more frequently + */ + nprobes = active / WL_SCAN_JOIN_PROBE_INTERVAL_MS; + } else { + nprobes = active / P2PAPI_SCAN_NPROBS_TIME_MS; + } + + if (nprobes <= 0) + nprobes = 1; + + brcmf_dbg(INFO, "nprobes # %d, active_time %d\n", nprobes, active); + sparams->active_time = cpu_to_le32(active); + sparams->nprobes = cpu_to_le32(nprobes); + sparams->passive_time = cpu_to_le32(-1); + sparams->channel_num = cpu_to_le32(num_chans & + BRCMF_SCAN_PARAMS_COUNT_MASK); + for (i = 0; i < num_chans; i++) + sparams->channel_list[i] = cpu_to_le16(chanspecs[i]); + + /* set the escan specific parameters */ + p2p_params->eparams.version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION); + p2p_params->eparams.action = cpu_to_le16(action); + p2p_params->eparams.sync_id = cpu_to_le16(0x1234); + /* perform p2p scan on primary device */ + ret = brcmf_fil_bsscfg_data_set(vif->ifp, "p2p_scan", memblk, memsize); + if (!ret) + set_bit(BRCMF_SCAN_STATUS_BUSY, &p2p->cfg->scan_status); +exit: + kfree(memblk); + return ret; +} + +/** + * brcmf_p2p_run_escan() - escan callback for peer-to-peer. + * + * @cfg: driver private data for cfg80211 interface. + * @ndev: net device for which scan is requested. + * @request: scan request from cfg80211. + * @action: scan action. + * + * Determines the P2P discovery state based to scan request parameters and + * validates the channels in the request. + */ +static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg, + struct net_device *ndev, + struct cfg80211_scan_request *request, + u16 action) +{ + struct brcmf_p2p_info *p2p = &cfg->p2p; + s32 err = 0; + s32 search_state = WL_P2P_DISC_ST_SCAN; + struct brcmf_cfg80211_vif *vif; + struct net_device *dev = NULL; + int i, num_nodfs = 0; + u16 *chanspecs; + + brcmf_dbg(TRACE, "enter\n"); + + if (!request) { + err = -EINVAL; + goto exit; + } + + if (request->n_channels) { + chanspecs = kcalloc(request->n_channels, sizeof(*chanspecs), + GFP_KERNEL); + if (!chanspecs) { + err = -ENOMEM; + goto exit; + } + vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif; + if (vif) + dev = vif->wdev.netdev; + if (request->n_channels == 3 && + request->channels[0]->hw_value == SOCIAL_CHAN_1 && + request->channels[1]->hw_value == SOCIAL_CHAN_2 && + request->channels[2]->hw_value == SOCIAL_CHAN_3) { + /* SOCIAL CHANNELS 1, 6, 11 */ + search_state = WL_P2P_DISC_ST_SEARCH; + brcmf_dbg(INFO, "P2P SEARCH PHASE START\n"); + } else if (dev != NULL && vif->mode == WL_MODE_AP) { + /* If you are already a GO, then do SEARCH only */ + brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n"); + search_state = WL_P2P_DISC_ST_SEARCH; + } else { + brcmf_dbg(INFO, "P2P SCAN STATE START\n"); + } + + /* + * no P2P scanning on passive or DFS channels. + */ + for (i = 0; i < request->n_channels; i++) { + struct ieee80211_channel *chan = request->channels[i]; + + if (chan->flags & (IEEE80211_CHAN_RADAR | + IEEE80211_CHAN_PASSIVE_SCAN)) + continue; + + chanspecs[i] = channel_to_chanspec(chan); + brcmf_dbg(INFO, "%d: chan=%d, channel spec=%x\n", + num_nodfs, chan->hw_value, chanspecs[i]); + num_nodfs++; + } + err = brcmf_p2p_escan(p2p, num_nodfs, chanspecs, search_state, + action, P2PAPI_BSSCFG_DEVICE); + } +exit: + if (err) + brcmf_err("error (%d)\n", err); + return err; +} + + +/** + * brcmf_p2p_find_listen_channel() - find listen channel in ie string. + * + * @ie: string of information elements. + * @ie_len: length of string. + * + * Scan ie for p2p ie and look for attribute 6 channel. If available determine + * channel and return it. + */ +static s32 brcmf_p2p_find_listen_channel(const u8 *ie, u32 ie_len) +{ + u8 channel_ie[5]; + s32 listen_channel; + s32 err; + + err = cfg80211_get_p2p_attr(ie, ie_len, + IEEE80211_P2P_ATTR_LISTEN_CHANNEL, + channel_ie, sizeof(channel_ie)); + if (err < 0) + return err; + + /* listen channel subel length format: */ + /* 3(country) + 1(op. class) + 1(chan num) */ + listen_channel = (s32)channel_ie[3 + 1]; + + if (listen_channel == SOCIAL_CHAN_1 || + listen_channel == SOCIAL_CHAN_2 || + listen_channel == SOCIAL_CHAN_3) { + brcmf_dbg(INFO, "Found my Listen Channel %d\n", listen_channel); + return listen_channel; + } + + return -EPERM; +} + + +/** + * brcmf_p2p_scan_prep() - prepare scan based on request. + * + * @wiphy: wiphy device. + * @request: scan request from cfg80211. + * @vif: vif on which scan request is to be executed. + * + * Prepare the scan appropriately for type of scan requested. Overrides the + * escan .run() callback for peer-to-peer scanning. + */ +int brcmf_p2p_scan_prep(struct wiphy *wiphy, + struct cfg80211_scan_request *request, + struct brcmf_cfg80211_vif *vif) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_p2p_info *p2p = &cfg->p2p; + int err = 0; + + if (brcmf_p2p_scan_is_p2p_request(request)) { + /* find my listen channel */ + err = brcmf_p2p_find_listen_channel(request->ie, + request->ie_len); + if (err < 0) + return err; + + p2p->afx_hdl.my_listen_chan = err; + + clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); + brcmf_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n"); + + err = brcmf_p2p_enable_discovery(p2p); + if (err) + return err; + + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + + /* override .run_escan() callback. */ + cfg->escan_info.run = brcmf_p2p_run_escan; + } + err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG, + request->ie, request->ie_len); + return err; +} + + +/** + * brcmf_p2p_discover_listen() - set firmware to discover listen state. + * + * @p2p: p2p device. + * @channel: channel nr for discover listen. + * @duration: time in ms to stay on channel. + * + */ +static s32 +brcmf_p2p_discover_listen(struct brcmf_p2p_info *p2p, u16 channel, u32 duration) +{ + struct brcmf_cfg80211_vif *vif; + s32 err = 0; + u16 chanspec; + + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + if (!vif) { + brcmf_err("Discovery is not set, so we have nothing to do\n"); + err = -EPERM; + goto exit; + } + + if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status)) { + brcmf_err("Previous LISTEN is not completed yet\n"); + /* WAR: prevent cookie mismatch in wpa_supplicant return OK */ + goto exit; + } + + chanspec = brcmf_p2p_chnr_to_chspec(channel); + err = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_LISTEN, + chanspec, (u16)duration); + if (!err) { + set_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status); + p2p->remain_on_channel_cookie++; + } +exit: + return err; +} + + +/** + * brcmf_p2p_remain_on_channel() - put device on channel and stay there. + * + * @wiphy: wiphy device. + * @channel: channel to stay on. + * @duration: time in ms to remain on channel. + * + */ +int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, + struct ieee80211_channel *channel, + unsigned int duration, u64 *cookie) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_p2p_info *p2p = &cfg->p2p; + s32 err; + u16 channel_nr; + + channel_nr = ieee80211_frequency_to_channel(channel->center_freq); + brcmf_dbg(TRACE, "Enter, channel: %d, duration ms (%d)\n", channel_nr, + duration); + + err = brcmf_p2p_enable_discovery(p2p); + if (err) + goto exit; + err = brcmf_p2p_discover_listen(p2p, channel_nr, duration); + if (err) + goto exit; + + memcpy(&p2p->remain_on_channel, channel, sizeof(*channel)); + *cookie = p2p->remain_on_channel_cookie; + cfg80211_ready_on_channel(wdev, *cookie, channel, duration, GFP_KERNEL); + +exit: + return err; +} + + +/** + * brcmf_p2p_notify_listen_complete() - p2p listen has completed. + * + * @ifp: interfac control. + * @e: event message. Not used, to make it usable for fweh event dispatcher. + * @data: payload of message. Not used. + * + */ +int brcmf_p2p_notify_listen_complete(struct brcmf_if *ifp, + const struct brcmf_event_msg *e, + void *data) +{ + struct brcmf_cfg80211_info *cfg = ifp->drvr->config; + struct brcmf_p2p_info *p2p = &cfg->p2p; + + brcmf_dbg(TRACE, "Enter\n"); + if (test_and_clear_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, + &p2p->status)) { + if (test_and_clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN, + &p2p->status)) { + clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, + &p2p->status); + brcmf_dbg(INFO, "Listen DONE, wake up wait_next_af\n"); + complete(&p2p->wait_next_af); + } + + cfg80211_remain_on_channel_expired(&ifp->vif->wdev, + p2p->remain_on_channel_cookie, + &p2p->remain_on_channel, + GFP_KERNEL); + } + return 0; +} + + +/** + * brcmf_p2p_cancel_remain_on_channel() - cancel p2p listen state. + * + * @ifp: interfac control. + * + */ +void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp) +{ + if (!ifp) + return; + brcmf_p2p_set_discover_state(ifp, WL_P2P_DISC_ST_SCAN, 0, 0); + brcmf_p2p_notify_listen_complete(ifp, NULL, NULL); +} + + +/** + * brcmf_p2p_act_frm_search() - search function for action frame. + * + * @p2p: p2p device. + * channel: channel on which action frame is to be trasmitted. + * + * search function to reach at common channel to send action frame. When + * channel is 0 then all social channels will be used to send af + */ +static s32 brcmf_p2p_act_frm_search(struct brcmf_p2p_info *p2p, u16 channel) +{ + s32 err; + u32 channel_cnt; + u16 *default_chan_list; + u32 i; + + brcmf_dbg(TRACE, "Enter\n"); + + if (channel) + channel_cnt = AF_PEER_SEARCH_CNT; + else + channel_cnt = SOCIAL_CHAN_CNT; + default_chan_list = kzalloc(channel_cnt * sizeof(*default_chan_list), + GFP_KERNEL); + if (default_chan_list == NULL) { + brcmf_err("channel list allocation failed\n"); + err = -ENOMEM; + goto exit; + } + if (channel) { + /* insert same channel to the chan_list */ + for (i = 0; i < channel_cnt; i++) + default_chan_list[i] = + brcmf_p2p_chnr_to_chspec(channel); + } else { + default_chan_list[0] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_1); + default_chan_list[1] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_2); + default_chan_list[2] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_3); + } + err = brcmf_p2p_escan(p2p, channel_cnt, default_chan_list, + WL_P2P_DISC_ST_SEARCH, WL_ESCAN_ACTION_START, + P2PAPI_BSSCFG_DEVICE); + kfree(default_chan_list); +exit: + return err; +} + + +/** + * brcmf_p2p_afx_handler() - afx worker thread. + * + * @work: + * + */ +static void brcmf_p2p_afx_handler(struct work_struct *work) +{ + struct afx_hdl *afx_hdl = container_of(work, struct afx_hdl, afx_work); + struct brcmf_p2p_info *p2p = container_of(afx_hdl, + struct brcmf_p2p_info, + afx_hdl); + s32 err; + + if (!afx_hdl->is_active) + return; + + if (afx_hdl->is_listen && afx_hdl->my_listen_chan) + /* 100ms ~ 300ms */ + err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan, + 100 * (1 + (random32() % 3))); + else + err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan); + + if (err) { + brcmf_err("ERROR occurred! value is (%d)\n", err); + if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, + &p2p->status)) + complete(&afx_hdl->act_frm_scan); + } +} + + +/** + * brcmf_p2p_af_searching_channel() - search channel. + * + * @p2p: p2p device info struct. + * + */ +static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) +{ + struct afx_hdl *afx_hdl = &p2p->afx_hdl; + struct brcmf_cfg80211_vif *pri_vif; + unsigned long duration; + s32 retry; + + brcmf_dbg(TRACE, "Enter\n"); + + pri_vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; + + INIT_COMPLETION(afx_hdl->act_frm_scan); + set_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status); + afx_hdl->is_active = true; + afx_hdl->peer_chan = P2P_INVALID_CHANNEL; + + /* Loop to wait until we find a peer's channel or the + * pending action frame tx is cancelled. + */ + retry = 0; + duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT); + while ((retry < P2P_CHANNEL_SYNC_RETRY) && + (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) { + afx_hdl->is_listen = false; + brcmf_dbg(TRACE, "Scheduling action frame for sending.. (%d)\n", + retry); + /* search peer on peer's listen channel */ + schedule_work(&afx_hdl->afx_work); + wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration); + if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) || + (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, + &p2p->status))) + break; + + if (afx_hdl->my_listen_chan) { + brcmf_dbg(TRACE, "Scheduling listen peer, channel=%d\n", + afx_hdl->my_listen_chan); + /* listen on my listen channel */ + afx_hdl->is_listen = true; + schedule_work(&afx_hdl->afx_work); + wait_for_completion_timeout(&afx_hdl->act_frm_scan, + duration); + } + if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) || + (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, + &p2p->status))) + break; + retry++; + + /* if sta is connected or connecting, sleep for a while before + * retry af tx or finding a peer + */ + if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &pri_vif->sme_state) || + test_bit(BRCMF_VIF_STATUS_CONNECTING, &pri_vif->sme_state)) + msleep(P2P_DEFAULT_SLEEP_TIME_VSDB); + } + + brcmf_dbg(TRACE, "Completed search/listen peer_chan=%d\n", + afx_hdl->peer_chan); + afx_hdl->is_active = false; + + clear_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status); + + return afx_hdl->peer_chan; +} + + +/** + * brcmf_p2p_scan_finding_common_channel() - was escan used for finding channel + * + * @cfg: common configuration struct. + * @bi: bss info struct, result from scan. + * + */ +bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg, + struct brcmf_bss_info_le *bi) + +{ + struct brcmf_p2p_info *p2p = &cfg->p2p; + struct afx_hdl *afx_hdl = &p2p->afx_hdl; + u8 *ie; + s32 err; + u8 p2p_dev_addr[ETH_ALEN]; + + if (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status)) + return false; + + if (bi == NULL) { + brcmf_dbg(TRACE, "ACTION FRAME SCAN Done\n"); + if (afx_hdl->peer_chan == P2P_INVALID_CHANNEL) + complete(&afx_hdl->act_frm_scan); + return true; + } + + ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset); + memset(p2p_dev_addr, 0, sizeof(p2p_dev_addr)); + err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length), + IEEE80211_P2P_ATTR_DEVICE_INFO, + p2p_dev_addr, sizeof(p2p_dev_addr)); + if (err < 0) + err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length), + IEEE80211_P2P_ATTR_DEVICE_ID, + p2p_dev_addr, sizeof(p2p_dev_addr)); + if ((err >= 0) && + (!memcmp(p2p_dev_addr, afx_hdl->tx_dst_addr, ETH_ALEN))) { + afx_hdl->peer_chan = bi->ctl_ch ? bi->ctl_ch : + CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec)); + brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n", + afx_hdl->tx_dst_addr, afx_hdl->peer_chan); + complete(&afx_hdl->act_frm_scan); + } + return true; +} + +/** + * brcmf_p2p_stop_wait_next_action_frame() - finish scan if af tx complete. + * + * @cfg: common configuration struct. + * + */ +static void +brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg) +{ + struct brcmf_p2p_info *p2p = &cfg->p2p; + struct net_device *ndev = cfg->escan_info.ndev; + + if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) && + (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) || + test_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status))) { + brcmf_dbg(TRACE, "*** Wake UP ** abort actframe iovar\n"); + /* if channel is not zero, "actfame" uses off channel scan. + * So abort scan for off channel completion. + */ + if (p2p->af_sent_channel) + brcmf_notify_escan_complete(cfg, ndev, true, true); + } else if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN, + &p2p->status)) { + brcmf_dbg(TRACE, "*** Wake UP ** abort listen for next af frame\n"); + /* So abort scan to cancel listen */ + brcmf_notify_escan_complete(cfg, ndev, true, true); + } +} + + +/** + * brcmf_p2p_gon_req_collision() - Check if go negotiaton collission + * + * @p2p: p2p device info struct. + * + * return true if recevied action frame is to be dropped. + */ +static bool +brcmf_p2p_gon_req_collision(struct brcmf_p2p_info *p2p, u8 *mac) +{ + struct brcmf_cfg80211_info *cfg = p2p->cfg; + struct brcmf_if *ifp; + + brcmf_dbg(TRACE, "Enter\n"); + + if (!test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) || + !p2p->gon_req_action) + return false; + + brcmf_dbg(TRACE, "GO Negotiation Request COLLISION !!!\n"); + /* if sa(peer) addr is less than da(my) addr, then this device + * process peer's gon request and block to send gon req. + * if not (sa addr > da addr), + * this device will process gon request and drop gon req of peer. + */ + ifp = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->ifp; + if (memcmp(mac, ifp->mac_addr, ETH_ALEN) < 0) { + brcmf_dbg(INFO, "Block transmit gon req !!!\n"); + p2p->block_gon_req_tx = true; + /* if we are finding a common channel for sending af, + * do not scan more to block to send current gon req + */ + if (test_and_clear_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, + &p2p->status)) + complete(&p2p->afx_hdl.act_frm_scan); + if (test_and_clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, + &p2p->status)) + brcmf_p2p_stop_wait_next_action_frame(cfg); + return false; + } + + /* drop gon request of peer to process gon request by this device. */ + brcmf_dbg(INFO, "Drop received gon req !!!\n"); + + return true; +} + + +/** + * brcmf_p2p_notify_action_frame_rx() - received action frame. + * + * @ifp: interfac control. + * @e: event message. Not used, to make it usable for fweh event dispatcher. + * @data: payload of message, containing action frame data. + * + */ +int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, + const struct brcmf_event_msg *e, + void *data) +{ + struct brcmf_cfg80211_info *cfg = ifp->drvr->config; + struct brcmf_p2p_info *p2p = &cfg->p2p; + struct afx_hdl *afx_hdl = &p2p->afx_hdl; + struct wireless_dev *wdev; + u32 mgmt_frame_len = e->datalen - sizeof(struct brcmf_rx_mgmt_data); + struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data; + u8 *frame = (u8 *)(rxframe + 1); + struct brcmf_p2p_pub_act_frame *act_frm; + struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm; + u16 chanspec = be16_to_cpu(rxframe->chanspec); + struct ieee80211_mgmt *mgmt_frame; + s32 freq; + u16 mgmt_type; + u8 action; + + /* Check if wpa_supplicant has registered for this frame */ + brcmf_dbg(INFO, "ifp->vif->mgmt_rx_reg %04x\n", ifp->vif->mgmt_rx_reg); + mgmt_type = (IEEE80211_STYPE_ACTION & IEEE80211_FCTL_STYPE) >> 4; + if ((ifp->vif->mgmt_rx_reg & BIT(mgmt_type)) == 0) + return 0; + + brcmf_p2p_print_actframe(false, frame, mgmt_frame_len); + + action = P2P_PAF_SUBTYPE_INVALID; + if (brcmf_p2p_is_pub_action(frame, mgmt_frame_len)) { + act_frm = (struct brcmf_p2p_pub_act_frame *)frame; + action = act_frm->subtype; + if ((action == P2P_PAF_GON_REQ) && + (brcmf_p2p_gon_req_collision(p2p, (u8 *)e->addr))) { + if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, + &p2p->status) && + (memcmp(afx_hdl->tx_dst_addr, e->addr, + ETH_ALEN) == 0)) { + afx_hdl->peer_chan = CHSPEC_CHANNEL(chanspec); + brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n", + afx_hdl->peer_chan); + complete(&afx_hdl->act_frm_scan); + } + return 0; + } + /* After complete GO Negotiation, roll back to mpc mode */ + if ((action == P2P_PAF_GON_CONF) || + (action == P2P_PAF_PROVDIS_RSP)) + brcmf_set_mpc(ifp->ndev, 1); + if (action == P2P_PAF_GON_CONF) { + brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n"); + clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); + } + } else if (brcmf_p2p_is_gas_action(frame, mgmt_frame_len)) { + sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame; + action = sd_act_frm->action; + } + + if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) && + (p2p->next_af_subtype == action)) { + brcmf_dbg(TRACE, "We got a right next frame! (%d)\n", action); + clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, + &p2p->status); + /* Stop waiting for next AF. */ + brcmf_p2p_stop_wait_next_action_frame(cfg); + } + + mgmt_frame = kzalloc(offsetof(struct ieee80211_mgmt, u) + + mgmt_frame_len, GFP_KERNEL); + if (!mgmt_frame) { + brcmf_err("No memory available for action frame\n"); + return -ENOMEM; + } + memcpy(mgmt_frame->da, ifp->mac_addr, ETH_ALEN); + brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mgmt_frame->bssid, + ETH_ALEN); + memcpy(mgmt_frame->sa, e->addr, ETH_ALEN); + mgmt_frame->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION); + memcpy(&mgmt_frame->u, frame, mgmt_frame_len); + mgmt_frame_len += offsetof(struct ieee80211_mgmt, u); + + freq = ieee80211_channel_to_frequency(CHSPEC_CHANNEL(chanspec), + CHSPEC_IS2G(chanspec) ? + IEEE80211_BAND_2GHZ : + IEEE80211_BAND_5GHZ); + wdev = ifp->ndev->ieee80211_ptr; + cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, + GFP_ATOMIC); + + kfree(mgmt_frame); + return 0; +} + + +/** + * brcmf_p2p_notify_action_tx_complete() - transmit action frame complete + * + * @ifp: interfac control. + * @e: event message. Not used, to make it usable for fweh event dispatcher. + * @data: not used. + * + */ +int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, + const struct brcmf_event_msg *e, + void *data) +{ + struct brcmf_cfg80211_info *cfg = ifp->drvr->config; + struct brcmf_p2p_info *p2p = &cfg->p2p; + + brcmf_dbg(INFO, "Enter: event %s, status=%d\n", + e->event_code == BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE ? + "ACTION_FRAME_OFF_CHAN_COMPLETE" : "ACTION_FRAME_COMPLETE", + e->status); + + if (!test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status)) + return 0; + + if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) { + if (e->status == BRCMF_E_STATUS_SUCCESS) + set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, + &p2p->status); + else { + set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); + /* If there is no ack, we don't need to wait for + * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event + */ + brcmf_p2p_stop_wait_next_action_frame(cfg); + } + + } else { + complete(&p2p->send_af_done); + } + return 0; +} + + +/** + * brcmf_p2p_tx_action_frame() - send action frame over fil. + * + * @p2p: p2p info struct for vif. + * @af_params: action frame data/info. + * + * Send an action frame immediately without doing channel synchronization. + * + * This function waits for a completion event before returning. + * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action + * frame is transmitted. + */ +static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, + struct brcmf_fil_af_params_le *af_params) +{ + struct brcmf_cfg80211_vif *vif; + s32 err = 0; + s32 timeout = 0; + + brcmf_dbg(TRACE, "Enter\n"); + + INIT_COMPLETION(p2p->send_af_done); + clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status); + clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); + + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params, + sizeof(*af_params)); + if (err) { + brcmf_err(" sending action frame has failed\n"); + goto exit; + } + + p2p->af_sent_channel = le32_to_cpu(af_params->channel); + p2p->af_tx_sent_jiffies = jiffies; + + timeout = wait_for_completion_timeout(&p2p->send_af_done, + msecs_to_jiffies(P2P_AF_MAX_WAIT_TIME)); + + if (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status)) { + brcmf_dbg(TRACE, "TX action frame operation is success\n"); + } else { + err = -EIO; + brcmf_dbg(TRACE, "TX action frame operation has failed\n"); + } + /* clear status bit for action tx */ + clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status); + clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); + +exit: + return err; +} + + +/** + * brcmf_p2p_pub_af_tx() - public action frame tx routine. + * + * @cfg: driver private data for cfg80211 interface. + * @af_params: action frame data/info. + * @config_af_params: configuration data for action frame. + * + * routine which transmits ation frame public type. + */ +static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg, + struct brcmf_fil_af_params_le *af_params, + struct brcmf_config_af_params *config_af_params) +{ + struct brcmf_p2p_info *p2p = &cfg->p2p; + struct brcmf_fil_action_frame_le *action_frame; + struct brcmf_p2p_pub_act_frame *act_frm; + s32 err = 0; + u16 ie_len; + + action_frame = &af_params->action_frame; + act_frm = (struct brcmf_p2p_pub_act_frame *)(action_frame->data); + + config_af_params->extra_listen = true; + + switch (act_frm->subtype) { + case P2P_PAF_GON_REQ: + brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status set\n"); + set_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); + config_af_params->mpc_onoff = 0; + config_af_params->search_channel = true; + p2p->next_af_subtype = act_frm->subtype + 1; + p2p->gon_req_action = true; + /* increase dwell time to wait for RESP frame */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME); + break; + case P2P_PAF_GON_RSP: + p2p->next_af_subtype = act_frm->subtype + 1; + /* increase dwell time to wait for CONF frame */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME); + break; + case P2P_PAF_GON_CONF: + /* If we reached till GO Neg confirmation reset the filter */ + brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n"); + clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); + /* turn on mpc again if go nego is done */ + config_af_params->mpc_onoff = 1; + /* minimize dwell time */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME); + config_af_params->extra_listen = false; + break; + case P2P_PAF_INVITE_REQ: + config_af_params->search_channel = true; + p2p->next_af_subtype = act_frm->subtype + 1; + /* increase dwell time */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME); + break; + case P2P_PAF_INVITE_RSP: + /* minimize dwell time */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME); + config_af_params->extra_listen = false; + break; + case P2P_PAF_DEVDIS_REQ: + config_af_params->search_channel = true; + p2p->next_af_subtype = act_frm->subtype + 1; + /* maximize dwell time to wait for RESP frame */ + af_params->dwell_time = cpu_to_le32(P2P_AF_LONG_DWELL_TIME); + break; + case P2P_PAF_DEVDIS_RSP: + /* minimize dwell time */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME); + config_af_params->extra_listen = false; + break; + case P2P_PAF_PROVDIS_REQ: + ie_len = le16_to_cpu(action_frame->len) - + offsetof(struct brcmf_p2p_pub_act_frame, elts); + if (cfg80211_get_p2p_attr(&act_frm->elts[0], ie_len, + IEEE80211_P2P_ATTR_GROUP_ID, + NULL, 0) < 0) + config_af_params->search_channel = true; + config_af_params->mpc_onoff = 0; + p2p->next_af_subtype = act_frm->subtype + 1; + /* increase dwell time to wait for RESP frame */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME); + break; + case P2P_PAF_PROVDIS_RSP: + /* wpa_supplicant send go nego req right after prov disc */ + p2p->next_af_subtype = P2P_PAF_GON_REQ; + /* increase dwell time to MED level */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME); + config_af_params->extra_listen = false; + break; + default: + brcmf_err("Unknown p2p pub act frame subtype: %d\n", + act_frm->subtype); + err = -EINVAL; + } + return err; +} + +/** + * brcmf_p2p_send_action_frame() - send action frame . + * + * @cfg: driver private data for cfg80211 interface. + * @ndev: net device to transmit on. + * @af_params: configuration data for action frame. + */ +bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, + struct net_device *ndev, + struct brcmf_fil_af_params_le *af_params) +{ + struct brcmf_p2p_info *p2p = &cfg->p2p; + struct brcmf_fil_action_frame_le *action_frame; + struct brcmf_config_af_params config_af_params; + struct afx_hdl *afx_hdl = &p2p->afx_hdl; + u16 action_frame_len; + bool ack = false; + u8 category; + u8 action; + s32 tx_retry; + s32 extra_listen_time; + uint delta_ms; + + action_frame = &af_params->action_frame; + action_frame_len = le16_to_cpu(action_frame->len); + + brcmf_p2p_print_actframe(true, action_frame->data, action_frame_len); + + /* Add the default dwell time. Dwell time to stay off-channel */ + /* to wait for a response action frame after transmitting an */ + /* GO Negotiation action frame */ + af_params->dwell_time = cpu_to_le32(P2P_AF_DWELL_TIME); + + category = action_frame->data[DOT11_ACTION_CAT_OFF]; + action = action_frame->data[DOT11_ACTION_ACT_OFF]; + + /* initialize variables */ + p2p->next_af_subtype = P2P_PAF_SUBTYPE_INVALID; + p2p->gon_req_action = false; + + /* config parameters */ + config_af_params.mpc_onoff = -1; + config_af_params.search_channel = false; + config_af_params.extra_listen = false; + + if (brcmf_p2p_is_pub_action(action_frame->data, action_frame_len)) { + /* p2p public action frame process */ + if (brcmf_p2p_pub_af_tx(cfg, af_params, &config_af_params)) { + /* Just send unknown subtype frame with */ + /* default parameters. */ + brcmf_err("P2P Public action frame, unknown subtype.\n"); + } + } else if (brcmf_p2p_is_gas_action(action_frame->data, + action_frame_len)) { + /* service discovery process */ + if (action == P2PSD_ACTION_ID_GAS_IREQ || + action == P2PSD_ACTION_ID_GAS_CREQ) { + /* configure service discovery query frame */ + config_af_params.search_channel = true; + + /* save next af suptype to cancel */ + /* remaining dwell time */ + p2p->next_af_subtype = action + 1; + + af_params->dwell_time = + cpu_to_le32(P2P_AF_MED_DWELL_TIME); + } else if (action == P2PSD_ACTION_ID_GAS_IRESP || + action == P2PSD_ACTION_ID_GAS_CRESP) { + /* configure service discovery response frame */ + af_params->dwell_time = + cpu_to_le32(P2P_AF_MIN_DWELL_TIME); + } else { + brcmf_err("Unknown action type: %d\n", action); + goto exit; + } + } else if (brcmf_p2p_is_p2p_action(action_frame->data, + action_frame_len)) { + /* do not configure anything. it will be */ + /* sent with a default configuration */ + } else { + brcmf_err("Unknown Frame: category 0x%x, action 0x%x\n", + category, action); + return false; + } + + /* if connecting on primary iface, sleep for a while before sending + * af tx for VSDB + */ + if (test_bit(BRCMF_VIF_STATUS_CONNECTING, + &p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->sme_state)) + msleep(50); + + /* if scan is ongoing, abort current scan. */ + if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) + brcmf_abort_scanning(cfg); + + memcpy(afx_hdl->tx_dst_addr, action_frame->da, ETH_ALEN); + + /* To make sure to send successfully action frame, turn off mpc */ + if (config_af_params.mpc_onoff == 0) + brcmf_set_mpc(ndev, 0); + + /* set status and destination address before sending af */ + if (p2p->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) { + /* set status to cancel the remained dwell time in rx process */ + set_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status); + } + + p2p->af_sent_channel = 0; + set_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status); + /* validate channel and p2p ies */ + if (config_af_params.search_channel && + IS_P2P_SOCIAL_CHANNEL(le32_to_cpu(af_params->channel)) && + p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->saved_ie.probe_req_ie_len) { + afx_hdl = &p2p->afx_hdl; + afx_hdl->peer_listen_chan = le32_to_cpu(af_params->channel); + + if (brcmf_p2p_af_searching_channel(p2p) == + P2P_INVALID_CHANNEL) { + brcmf_err("Couldn't find peer's channel.\n"); + goto exit; + } + + /* Abort scan even for VSDB scenarios. Scan gets aborted in + * firmware but after the check of piggyback algorithm. To take + * care of current piggback algo, lets abort the scan here + * itself. + */ + brcmf_notify_escan_complete(cfg, ndev, true, true); + + /* update channel */ + af_params->channel = cpu_to_le32(afx_hdl->peer_chan); + } + + tx_retry = 0; + while (!p2p->block_gon_req_tx && + (ack == false) && (tx_retry < P2P_AF_TX_MAX_RETRY)) { + ack = !brcmf_p2p_tx_action_frame(p2p, af_params); + tx_retry++; + } + if (ack == false) { + brcmf_err("Failed to send Action Frame(retry %d)\n", tx_retry); + clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); + } + +exit: + clear_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status); + + /* WAR: sometimes dongle does not keep the dwell time of 'actframe'. + * if we coundn't get the next action response frame and dongle does + * not keep the dwell time, go to listen state again to get next action + * response frame. + */ + if (ack && config_af_params.extra_listen && !p2p->block_gon_req_tx && + test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) && + p2p->af_sent_channel == afx_hdl->my_listen_chan) { + delta_ms = jiffies_to_msecs(jiffies - p2p->af_tx_sent_jiffies); + if (le32_to_cpu(af_params->dwell_time) > delta_ms) + extra_listen_time = le32_to_cpu(af_params->dwell_time) - + delta_ms; + else + extra_listen_time = 0; + if (extra_listen_time > 50) { + set_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN, + &p2p->status); + brcmf_dbg(INFO, "Wait more time! actual af time:%d, calculated extra listen:%d\n", + le32_to_cpu(af_params->dwell_time), + extra_listen_time); + extra_listen_time += 100; + if (!brcmf_p2p_discover_listen(p2p, + p2p->af_sent_channel, + extra_listen_time)) { + unsigned long duration; + + extra_listen_time += 100; + duration = msecs_to_jiffies(extra_listen_time); + wait_for_completion_timeout(&p2p->wait_next_af, + duration); + } + clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN, + &p2p->status); + } + } + + if (p2p->block_gon_req_tx) { + /* if ack is true, supplicant will wait more time(100ms). + * so we will return it as a success to get more time . + */ + p2p->block_gon_req_tx = false; + ack = true; + } + + clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status); + /* if all done, turn mpc on again */ + if (config_af_params.mpc_onoff == 1) + brcmf_set_mpc(ndev, 1); + + return ack; +} + +/** + * brcmf_p2p_notify_rx_mgmt_p2p_probereq() - Event handler for p2p probe req. + * + * @ifp: interface pointer for which event was received. + * @e: even message. + * @data: payload of event message (probe request). + */ +s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp, + const struct brcmf_event_msg *e, + void *data) +{ + struct brcmf_cfg80211_info *cfg = ifp->drvr->config; + struct brcmf_p2p_info *p2p = &cfg->p2p; + struct afx_hdl *afx_hdl = &p2p->afx_hdl; + struct wireless_dev *wdev; + struct brcmf_cfg80211_vif *vif = ifp->vif; + struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data; + u16 chanspec = be16_to_cpu(rxframe->chanspec); + u8 *mgmt_frame; + u32 mgmt_frame_len; + s32 freq; + u16 mgmt_type; + + brcmf_dbg(INFO, "Enter: event %d reason %d\n", e->event_code, + e->reason); + + if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) && + (memcmp(afx_hdl->tx_dst_addr, e->addr, ETH_ALEN) == 0)) { + afx_hdl->peer_chan = CHSPEC_CHANNEL(chanspec); + brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n", + afx_hdl->peer_chan); + complete(&afx_hdl->act_frm_scan); + } + + /* Firmware sends us two proberesponses for each idx one. At the */ + /* moment anything but bsscfgidx 0 is passed up to supplicant */ + if (e->bsscfgidx == 0) + return 0; + + /* Filter any P2P probe reqs arriving during the GO-NEG Phase */ + if (test_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status)) { + brcmf_dbg(INFO, "Filtering P2P probe_req in GO-NEG phase\n"); + return 0; + } + + /* Check if wpa_supplicant has registered for this frame */ + brcmf_dbg(INFO, "vif->mgmt_rx_reg %04x\n", vif->mgmt_rx_reg); + mgmt_type = (IEEE80211_STYPE_PROBE_REQ & IEEE80211_FCTL_STYPE) >> 4; + if ((vif->mgmt_rx_reg & BIT(mgmt_type)) == 0) + return 0; + + mgmt_frame = (u8 *)(rxframe + 1); + mgmt_frame_len = e->datalen - sizeof(*rxframe); + freq = ieee80211_channel_to_frequency(CHSPEC_CHANNEL(chanspec), + CHSPEC_IS2G(chanspec) ? + IEEE80211_BAND_2GHZ : + IEEE80211_BAND_5GHZ); + wdev = ifp->ndev->ieee80211_ptr; + cfg80211_rx_mgmt(wdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC); + + brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n", + mgmt_frame_len, e->datalen, chanspec, freq); + + return 0; +} + + +/** + * brcmf_p2p_attach() - attach for P2P. + * + * @cfg: driver private data for cfg80211 interface. + */ +s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg) +{ + struct brcmf_if *pri_ifp; + struct brcmf_if *p2p_ifp; + struct brcmf_cfg80211_vif *p2p_vif; + struct brcmf_p2p_info *p2p; + struct brcmf_pub *drvr; + s32 bssidx; + s32 err = 0; + + p2p = &cfg->p2p; + p2p->cfg = cfg; + + drvr = cfg->pub; + + pri_ifp = drvr->iflist[0]; + p2p_ifp = drvr->iflist[1]; + + p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif; + + if (p2p_ifp) { + p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE, + false); + if (IS_ERR(p2p_vif)) { + brcmf_err("could not create discovery vif\n"); + err = -ENOMEM; + goto exit; + } + + p2p_vif->ifp = p2p_ifp; + p2p_ifp->vif = p2p_vif; + p2p_vif->wdev.netdev = p2p_ifp->ndev; + p2p_ifp->ndev->ieee80211_ptr = &p2p_vif->wdev; + SET_NETDEV_DEV(p2p_ifp->ndev, wiphy_dev(cfg->wiphy)); + + p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif; + + brcmf_p2p_generate_bss_mac(p2p); + brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr); + + /* Initialize P2P Discovery in the firmware */ + err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1); + if (err < 0) { + brcmf_err("set p2p_disc error\n"); + brcmf_free_vif(p2p_vif); + goto exit; + } + /* obtain bsscfg index for P2P discovery */ + err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx); + if (err < 0) { + brcmf_err("retrieving discover bsscfg index failed\n"); + brcmf_free_vif(p2p_vif); + goto exit; + } + /* Verify that firmware uses same bssidx as driver !! */ + if (p2p_ifp->bssidx != bssidx) { + brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n", + bssidx, p2p_ifp->bssidx); + brcmf_free_vif(p2p_vif); + goto exit; + } + + init_completion(&p2p->send_af_done); + INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler); + init_completion(&p2p->afx_hdl.act_frm_scan); + init_completion(&p2p->wait_next_af); + } +exit: + return err; +} + + +/** + * brcmf_p2p_detach() - detach P2P. + * + * @p2p: P2P specific data. + */ +void brcmf_p2p_detach(struct brcmf_p2p_info *p2p) +{ + struct brcmf_cfg80211_vif *vif; + + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + if (vif != NULL) { + brcmf_p2p_cancel_remain_on_channel(vif->ifp); + brcmf_p2p_deinit_discovery(p2p); + /* remove discovery interface */ + brcmf_free_vif(vif); + p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; + } + /* just set it all to zero */ + memset(p2p, 0, sizeof(*p2p)); +} + +/** + * brcmf_p2p_get_current_chanspec() - Get current operation channel. + * + * @p2p: P2P specific data. + * @chanspec: chanspec to be returned. + */ +static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p, + u16 *chanspec) +{ + struct brcmf_if *ifp; + struct brcmf_fil_chan_info_le ci; + s32 err; + + ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; + + *chanspec = 11 & WL_CHANSPEC_CHAN_MASK; + + err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CHANNEL, &ci, sizeof(ci)); + if (!err) { + *chanspec = le32_to_cpu(ci.hw_channel) & WL_CHANSPEC_CHAN_MASK; + if (*chanspec < CH_MAX_2G_CHANNEL) + *chanspec |= WL_CHANSPEC_BAND_2G; + else + *chanspec |= WL_CHANSPEC_BAND_5G; + } + *chanspec |= WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE; +} + +/** + * Change a P2P Role. + * Parameters: + * @mac: MAC address of the BSS to change a role + * Returns 0 if success. + */ +int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, + enum brcmf_fil_p2p_if_types if_type) +{ + struct brcmf_p2p_info *p2p = &cfg->p2p; + struct brcmf_cfg80211_vif *vif; + struct brcmf_fil_p2p_if_le if_request; + s32 err; + u16 chanspec; + + brcmf_dbg(TRACE, "Enter\n"); + + vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; + if (!vif) { + brcmf_err("vif for P2PAPI_BSSCFG_PRIMARY does not exist\n"); + return -EPERM; + } + brcmf_notify_escan_complete(cfg, vif->ifp->ndev, true, true); + vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif; + if (!vif) { + brcmf_err("vif for P2PAPI_BSSCFG_CONNECTION does not exist\n"); + return -EPERM; + } + brcmf_set_mpc(vif->ifp->ndev, 0); + + /* In concurrency case, STA may be already associated in a particular */ + /* channel. so retrieve the current channel of primary interface and */ + /* then start the virtual interface on that. */ + brcmf_p2p_get_current_chanspec(p2p, &chanspec); + + if_request.type = cpu_to_le16((u16)if_type); + if_request.chspec = cpu_to_le16(chanspec); + memcpy(if_request.addr, p2p->int_addr, sizeof(if_request.addr)); + + brcmf_cfg80211_arm_vif_event(cfg, vif); + err = brcmf_fil_iovar_data_set(vif->ifp, "p2p_ifupd", &if_request, + sizeof(if_request)); + if (err) { + brcmf_err("p2p_ifupd FAILED, err=%d\n", err); + brcmf_cfg80211_arm_vif_event(cfg, NULL); + return err; + } + err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_CHANGE, + msecs_to_jiffies(1500)); + brcmf_cfg80211_arm_vif_event(cfg, NULL); + if (!err) { + brcmf_err("No BRCMF_E_IF_CHANGE event received\n"); + return -EIO; + } + + err = brcmf_fil_cmd_int_set(vif->ifp, BRCMF_C_SET_SCB_TIMEOUT, + BRCMF_SCB_TIMEOUT_VALUE); + + return err; +} + +static int brcmf_p2p_request_p2p_if(struct brcmf_p2p_info *p2p, + struct brcmf_if *ifp, u8 ea[ETH_ALEN], + enum brcmf_fil_p2p_if_types iftype) +{ + struct brcmf_fil_p2p_if_le if_request; + int err; + u16 chanspec; + + /* we need a default channel */ + brcmf_p2p_get_current_chanspec(p2p, &chanspec); + + /* fill the firmware request */ + memcpy(if_request.addr, ea, ETH_ALEN); + if_request.type = cpu_to_le16((u16)iftype); + if_request.chspec = cpu_to_le16(chanspec); + + err = brcmf_fil_iovar_data_set(ifp, "p2p_ifadd", &if_request, + sizeof(if_request)); + if (err) + return err; + + return err; +} + +static int brcmf_p2p_disable_p2p_if(struct brcmf_cfg80211_vif *vif) +{ + struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev); + struct net_device *pri_ndev = cfg_to_ndev(cfg); + struct brcmf_if *ifp = netdev_priv(pri_ndev); + u8 *addr = vif->wdev.netdev->dev_addr; + + return brcmf_fil_iovar_data_set(ifp, "p2p_ifdis", addr, ETH_ALEN); +} + +static int brcmf_p2p_release_p2p_if(struct brcmf_cfg80211_vif *vif) +{ + struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev); + struct net_device *pri_ndev = cfg_to_ndev(cfg); + struct brcmf_if *ifp = netdev_priv(pri_ndev); + u8 *addr = vif->wdev.netdev->dev_addr; + + return brcmf_fil_iovar_data_set(ifp, "p2p_ifdel", addr, ETH_ALEN); +} + +/** + * brcmf_p2p_add_vif() - create a new P2P virtual interface. + * + * @wiphy: wiphy device of new interface. + * @name: name of the new interface. + * @type: nl80211 interface type. + * @flags: TBD + * @params: TBD + */ +struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct brcmf_cfg80211_vif *vif; + enum brcmf_fil_p2p_if_types iftype; + enum wl_mode mode; + int err; + + if (brcmf_cfg80211_vif_event_armed(cfg)) + return ERR_PTR(-EBUSY); + + brcmf_dbg(INFO, "adding vif \"%s\" (type=%d)\n", name, type); + + switch (type) { + case NL80211_IFTYPE_P2P_CLIENT: + iftype = BRCMF_FIL_P2P_IF_CLIENT; + mode = WL_MODE_BSS; + break; + case NL80211_IFTYPE_P2P_GO: + iftype = BRCMF_FIL_P2P_IF_GO; + mode = WL_MODE_AP; + break; + default: + return ERR_PTR(-EOPNOTSUPP); + } + + vif = brcmf_alloc_vif(cfg, type, false); + if (IS_ERR(vif)) + return (struct wireless_dev *)vif; + brcmf_cfg80211_arm_vif_event(cfg, vif); + + err = brcmf_p2p_request_p2p_if(&cfg->p2p, ifp, cfg->p2p.int_addr, + iftype); + if (err) { + brcmf_cfg80211_arm_vif_event(cfg, NULL); + goto fail; + } + + /* wait for firmware event */ + err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD, + msecs_to_jiffies(1500)); + brcmf_cfg80211_arm_vif_event(cfg, NULL); + if (!err) { + brcmf_err("timeout occurred\n"); + err = -EIO; + goto fail; + } + + /* interface created in firmware */ + ifp = vif->ifp; + if (!ifp) { + brcmf_err("no if pointer provided\n"); + err = -ENOENT; + goto fail; + } + + strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1); + err = brcmf_net_attach(ifp, true); + if (err) { + brcmf_err("Registering netdevice failed\n"); + goto fail; + } + cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif; + /* Disable firmware roaming for P2P interface */ + brcmf_fil_iovar_int_set(ifp, "roam_off", 1); + if (iftype == BRCMF_FIL_P2P_IF_GO) { + /* set station timeout for p2p */ + brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCB_TIMEOUT, + BRCMF_SCB_TIMEOUT_VALUE); + } + return &ifp->vif->wdev; + +fail: + brcmf_free_vif(vif); + return ERR_PTR(err); +} + +/** + * brcmf_p2p_del_vif() - delete a P2P virtual interface. + * + * @wiphy: wiphy device of interface. + * @wdev: wireless device of interface. + * + * TODO: not yet supported. + */ +int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); + struct brcmf_p2p_info *p2p = &cfg->p2p; + struct brcmf_cfg80211_vif *vif; + unsigned long jiffie_timeout = msecs_to_jiffies(1500); + bool wait_for_disable = false; + int err; + + brcmf_dbg(TRACE, "delete P2P vif\n"); + vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); + + switch (vif->wdev.iftype) { + case NL80211_IFTYPE_P2P_CLIENT: + if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state)) + wait_for_disable = true; + break; + + case NL80211_IFTYPE_P2P_GO: + if (!brcmf_p2p_disable_p2p_if(vif)) + wait_for_disable = true; + break; + + case NL80211_IFTYPE_P2P_DEVICE: + default: + return -ENOTSUPP; + break; + } + + clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); + brcmf_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n"); + + if (wait_for_disable) + wait_for_completion_timeout(&cfg->vif_disabled, + msecs_to_jiffies(500)); + + brcmf_vif_clear_mgmt_ies(vif); + + brcmf_cfg80211_arm_vif_event(cfg, vif); + err = brcmf_p2p_release_p2p_if(vif); + if (!err) { + /* wait for firmware event */ + err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_DEL, + jiffie_timeout); + if (!err) + err = -EIO; + else + err = 0; + } + brcmf_cfg80211_arm_vif_event(cfg, NULL); + brcmf_free_vif(vif); + p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL; + + return err; +} diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h new file mode 100644 index 000000000000..6821b26224be --- /dev/null +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2012 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef WL_CFGP2P_H_ +#define WL_CFGP2P_H_ + +#include <net/cfg80211.h> + +struct brcmf_cfg80211_info; + +/** + * enum p2p_bss_type - different type of BSS configurations. + * + * @P2PAPI_BSSCFG_PRIMARY: maps to driver's primary bsscfg. + * @P2PAPI_BSSCFG_DEVICE: maps to driver's P2P device discovery bsscfg. + * @P2PAPI_BSSCFG_CONNECTION: maps to driver's P2P connection bsscfg. + * @P2PAPI_BSSCFG_MAX: used for range checking. + */ +enum p2p_bss_type { + P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */ + P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */ + P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */ + P2PAPI_BSSCFG_MAX +}; + +/** + * struct p2p_bss - peer-to-peer bss related information. + * + * @vif: virtual interface of this P2P bss. + * @private_data: TBD + */ +struct p2p_bss { + struct brcmf_cfg80211_vif *vif; + void *private_data; +}; + +/** + * enum brcmf_p2p_status - P2P specific dongle status. + * + * @BRCMF_P2P_STATUS_IF_ADD: peer-to-peer vif add sent to dongle. + * @BRCMF_P2P_STATUS_IF_DEL: NOT-USED? + * @BRCMF_P2P_STATUS_IF_DELETING: peer-to-peer vif delete sent to dongle. + * @BRCMF_P2P_STATUS_IF_CHANGING: peer-to-peer vif change sent to dongle. + * @BRCMF_P2P_STATUS_IF_CHANGED: peer-to-peer vif change completed on dongle. + * @BRCMF_P2P_STATUS_ACTION_TX_COMPLETED: action frame tx completed. + * @BRCMF_P2P_STATUS_ACTION_TX_NOACK: action frame tx not acked. + * @BRCMF_P2P_STATUS_GO_NEG_PHASE: P2P GO negotiation ongoing. + * @BRCMF_P2P_STATUS_DISCOVER_LISTEN: P2P listen, remaining on channel. + * @BRCMF_P2P_STATUS_SENDING_ACT_FRAME: In the process of sending action frame. + * @BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN: extra listen time for af tx. + * @BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME: waiting for action frame response. + * @BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL: search channel for AF active. + */ +enum brcmf_p2p_status { + BRCMF_P2P_STATUS_ENABLED, + BRCMF_P2P_STATUS_IF_ADD, + BRCMF_P2P_STATUS_IF_DEL, + BRCMF_P2P_STATUS_IF_DELETING, + BRCMF_P2P_STATUS_IF_CHANGING, + BRCMF_P2P_STATUS_IF_CHANGED, + BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, + BRCMF_P2P_STATUS_ACTION_TX_NOACK, + BRCMF_P2P_STATUS_GO_NEG_PHASE, + BRCMF_P2P_STATUS_DISCOVER_LISTEN, + BRCMF_P2P_STATUS_SENDING_ACT_FRAME, + BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN, + BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, + BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL +}; + +/** + * struct afx_hdl - action frame off channel storage. + * + * @afx_work: worker thread for searching channel + * @act_frm_scan: thread synchronizing struct. + * @is_active: channel searching active. + * @peer_chan: current channel. + * @is_listen: sets mode for afx worker. + * @my_listen_chan: this peers listen channel. + * @peer_listen_chan: remote peers listen channel. + * @tx_dst_addr: mac address where tx af should be sent to. + */ +struct afx_hdl { + struct work_struct afx_work; + struct completion act_frm_scan; + bool is_active; + s32 peer_chan; + bool is_listen; + u16 my_listen_chan; + u16 peer_listen_chan; + u8 tx_dst_addr[ETH_ALEN]; +}; + +/** + * struct brcmf_p2p_info - p2p specific driver information. + * + * @cfg: driver private data for cfg80211 interface. + * @status: status of P2P (see enum brcmf_p2p_status). + * @dev_addr: P2P device address. + * @int_addr: P2P interface address. + * @bss_idx: informate for P2P bss types. + * @listen_timer: timer for @WL_P2P_DISC_ST_LISTEN discover state. + * @ssid: ssid for P2P GO. + * @listen_channel: channel for @WL_P2P_DISC_ST_LISTEN discover state. + * @remain_on_channel: contains copy of struct used by cfg80211. + * @remain_on_channel_cookie: cookie counter for remain on channel cmd + * @next_af_subtype: expected action frame subtype. + * @send_af_done: indication that action frame tx is complete. + * @afx_hdl: action frame search handler info. + * @af_sent_channel: channel action frame is sent. + * @af_tx_sent_jiffies: jiffies time when af tx was transmitted. + * @wait_next_af: thread synchronizing struct. + * @gon_req_action: about to send go negotiation requets frame. + * @block_gon_req_tx: drop tx go negotiation requets frame. + */ +struct brcmf_p2p_info { + struct brcmf_cfg80211_info *cfg; + unsigned long status; + u8 dev_addr[ETH_ALEN]; + u8 int_addr[ETH_ALEN]; + struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX]; + struct timer_list listen_timer; + struct brcmf_ssid ssid; + u8 listen_channel; + struct ieee80211_channel remain_on_channel; + u32 remain_on_channel_cookie; + u8 next_af_subtype; + struct completion send_af_done; + struct afx_hdl afx_hdl; + u32 af_sent_channel; + unsigned long af_tx_sent_jiffies; + struct completion wait_next_af; + bool gon_req_action; + bool block_gon_req_tx; +}; + +s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg); +void brcmf_p2p_detach(struct brcmf_p2p_info *p2p); +struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params); +int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev); +int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, + enum brcmf_fil_p2p_if_types if_type); +int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev); +void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev); +int brcmf_p2p_scan_prep(struct wiphy *wiphy, + struct cfg80211_scan_request *request, + struct brcmf_cfg80211_vif *vif); +int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, + struct ieee80211_channel *channel, + unsigned int duration, u64 *cookie); +int brcmf_p2p_notify_listen_complete(struct brcmf_if *ifp, + const struct brcmf_event_msg *e, + void *data); +void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp); +int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, + const struct brcmf_event_msg *e, + void *data); +int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, + const struct brcmf_event_msg *e, + void *data); +bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, + struct net_device *ndev, + struct brcmf_fil_af_params_le *af_params); +bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg, + struct brcmf_bss_info_le *bi); +s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp, + const struct brcmf_event_msg *e, + void *data); +#endif /* WL_CFGP2P_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c index b1bb46c49799..14be2d5530ce 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c @@ -15,8 +15,6 @@ */ /* ***** SDIO interface chip backplane handle functions ***** */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/types.h> #include <linux/netdevice.h> #include <linux/mmc/card.h> diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index 914c56fe6c5f..42289e9ea886 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c @@ -354,11 +354,10 @@ brcmf_usbdev_qinit(struct list_head *q, int qsize) int i; struct brcmf_usbreq *req, *reqs; - reqs = kzalloc(sizeof(struct brcmf_usbreq) * qsize, GFP_ATOMIC); - if (reqs == NULL) { - brcmf_err("fail to allocate memory!\n"); + reqs = kcalloc(qsize, sizeof(struct brcmf_usbreq), GFP_ATOMIC); + if (reqs == NULL) return NULL; - } + req = reqs; for (i = 0; i < qsize; i++) { @@ -421,10 +420,6 @@ static void brcmf_usb_tx_complete(struct urb *urb) brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status, req->skb); brcmf_usb_del_fromq(devinfo, req); - if (urb->status == 0) - devinfo->bus_pub.bus->dstats.tx_packets++; - else - devinfo->bus_pub.bus->dstats.tx_errors++; brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0); @@ -443,30 +438,25 @@ static void brcmf_usb_rx_complete(struct urb *urb) struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context; struct brcmf_usbdev_info *devinfo = req->devinfo; struct sk_buff *skb; - int ifidx = 0; + struct sk_buff_head skbq; brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status); brcmf_usb_del_fromq(devinfo, req); skb = req->skb; req->skb = NULL; - if (urb->status == 0) { - devinfo->bus_pub.bus->dstats.rx_packets++; - } else { - devinfo->bus_pub.bus->dstats.rx_errors++; + /* zero lenght packets indicate usb "failure". Do not refill */ + if (urb->status != 0 || !urb->actual_length) { brcmu_pkt_buf_free_skb(skb); brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL); return; } if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) { + skb_queue_head_init(&skbq); + skb_queue_tail(&skbq, skb); skb_put(skb, urb->actual_length); - if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) { - brcmf_err("rx protocol error\n"); - brcmu_pkt_buf_free_skb(skb); - devinfo->bus_pub.bus->dstats.rx_errors++; - } else - brcmf_rx_packet(devinfo->dev, ifidx, skb); + brcmf_rx_frames(devinfo->dev, &skbq); brcmf_usb_rx_refill(devinfo, req); } else { brcmu_pkt_buf_free_skb(skb); @@ -1259,6 +1249,8 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) bus->bus_priv.usb = bus_pub; dev_set_drvdata(dev, bus); bus->ops = &brcmf_usb_bus_ops; + bus->chip = bus_pub->devid; + bus->chiprev = bus_pub->chiprev; /* Attach to the common driver interface */ ret = brcmf_attach(0, dev); @@ -1520,10 +1512,23 @@ static void brcmf_release_fw(struct list_head *q) } } +static int brcmf_usb_reset_device(struct device *dev, void *notused) +{ + /* device past is the usb interface so we + * need to use parent here. + */ + brcmf_dev_reset(dev->parent); + return 0; +} void brcmf_usb_exit(void) { + struct device_driver *drv = &brcmf_usbdrvr.drvwrap.driver; + int ret; + brcmf_dbg(USB, "Enter\n"); + ret = driver_for_each_device(drv, NULL, NULL, + brcmf_usb_reset_device); usb_deregister(&brcmf_usbdrvr); brcmf_release_fw(&fw_image_list); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 75464ad4fbd1..cecc3eff72e9 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -16,8 +16,6 @@ /* Toplevel file. Relies on dhd_linux.c to send commands to the dongle. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/kernel.h> #include <linux/etherdevice.h> #include <net/cfg80211.h> @@ -28,6 +26,8 @@ #include <brcmu_wifi.h> #include "dhd.h" #include "dhd_dbg.h" +#include "fwil_types.h" +#include "p2p.h" #include "wl_cfg80211.h" #include "fwil.h" @@ -43,16 +43,13 @@ #define BRCMF_PNO_SCAN_COMPLETE 1 #define BRCMF_PNO_SCAN_INCOMPLETE 0 -#define BRCMF_IFACE_MAX_CNT 2 +#define BRCMF_IFACE_MAX_CNT 3 -#define TLV_LEN_OFF 1 /* length offset */ -#define TLV_HDR_LEN 2 /* header length */ -#define TLV_BODY_OFF 2 /* body offset */ -#define TLV_OUI_LEN 3 /* oui id length */ #define WPA_OUI "\x00\x50\xF2" /* WPA OUI */ #define WPA_OUI_TYPE 1 #define RSN_OUI "\x00\x0F\xAC" /* RSN OUI */ #define WME_OUI_TYPE 2 +#define WPS_OUI_TYPE 4 #define VS_IE_FIXED_HDR_LEN 6 #define WPA_IE_VERSION_LEN 2 @@ -78,13 +75,15 @@ #define VNDR_IE_PKTFLAG_OFFSET 8 #define VNDR_IE_VSIE_OFFSET 12 #define VNDR_IE_HDR_SIZE 12 -#define VNDR_IE_BEACON_FLAG 0x1 -#define VNDR_IE_PRBRSP_FLAG 0x2 -#define MAX_VNDR_IE_NUMBER 5 +#define VNDR_IE_PARSE_LIMIT 5 #define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */ #define DOT11_BCN_PRB_FIXED_LEN 12 /* beacon/probe fixed length */ +#define BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320 +#define BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400 +#define BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS 20 + #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \ (sizeof(struct brcmf_assoc_params_le) - sizeof(u16)) @@ -273,13 +272,6 @@ static const u32 __wl_cipher_suites[] = { WLAN_CIPHER_SUITE_AES_CMAC, }; -/* tag_ID/length/value_buffer tuple */ -struct brcmf_tlv { - u8 id; - u8 len; - u8 data[1]; -}; - /* Vendor specific ie. id = 221, oui and type defines exact ie */ struct brcmf_vs_tlv { u8 id; @@ -296,7 +288,7 @@ struct parsed_vndr_ie_info { struct parsed_vndr_ies { u32 count; - struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER]; + struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT]; }; /* Quarter dBm units to mW @@ -383,7 +375,7 @@ static u8 brcmf_mw_to_qdbm(u16 mw) return qdbm; } -static u16 channel_to_chanspec(struct ieee80211_channel *ch) +u16 channel_to_chanspec(struct ieee80211_channel *ch) { u16 chanspec; @@ -395,19 +387,92 @@ static u16 channel_to_chanspec(struct ieee80211_channel *ch) else chanspec |= WL_CHANSPEC_BAND_5G; - if (ch->flags & IEEE80211_CHAN_NO_HT40) { - chanspec |= WL_CHANSPEC_BW_20; - chanspec |= WL_CHANSPEC_CTL_SB_NONE; - } else { - chanspec |= WL_CHANSPEC_BW_40; - if (ch->flags & IEEE80211_CHAN_NO_HT40PLUS) - chanspec |= WL_CHANSPEC_CTL_SB_LOWER; - else - chanspec |= WL_CHANSPEC_CTL_SB_UPPER; - } + chanspec |= WL_CHANSPEC_BW_20; + chanspec |= WL_CHANSPEC_CTL_SB_NONE; + return chanspec; } +/* Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag + */ +struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key) +{ + struct brcmf_tlv *elt; + int totlen; + + elt = (struct brcmf_tlv *)buf; + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + int len = elt->len; + + /* validate remaining totlen */ + if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN))) + return elt; + + elt = (struct brcmf_tlv *)((u8 *)elt + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + } + + return NULL; +} + +/* Is any of the tlvs the expected entry? If + * not update the tlvs buffer pointer/length. + */ +static bool +brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, + u8 *oui, u32 oui_len, u8 type) +{ + /* If the contents match the OUI and the type */ + if (ie[TLV_LEN_OFF] >= oui_len + 1 && + !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) && + type == ie[TLV_BODY_OFF + oui_len]) { + return true; + } + + if (tlvs == NULL) + return false; + /* point to the next ie */ + ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN; + /* calculate the length of the rest of the buffer */ + *tlvs_len -= (int)(ie - *tlvs); + /* update the pointer to the start of the buffer */ + *tlvs = ie; + + return false; +} + +static struct brcmf_vs_tlv * +brcmf_find_wpaie(u8 *parse, u32 len) +{ + struct brcmf_tlv *ie; + + while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) { + if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len, + WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE)) + return (struct brcmf_vs_tlv *)ie; + } + return NULL; +} + +static struct brcmf_vs_tlv * +brcmf_find_wpsie(u8 *parse, u32 len) +{ + struct brcmf_tlv *ie; + + while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) { + if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len, + WPA_OUI, TLV_OUI_LEN, WPS_OUI_TYPE)) + return (struct brcmf_vs_tlv *)ie; + } + return NULL; +} + + static void convert_key_from_CPU(struct brcmf_wsec_key *key, struct brcmf_wsec_key_le *key_le) { @@ -440,11 +505,153 @@ send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key) return err; } +static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy, + const char *name, + enum nl80211_iftype type, + u32 *flags, + struct vif_params *params) +{ + brcmf_dbg(TRACE, "enter: %s type %d\n", name, type); + switch (type) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_MESH_POINT: + return ERR_PTR(-EOPNOTSUPP); + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + return brcmf_p2p_add_vif(wiphy, name, type, flags, params); + case NL80211_IFTYPE_UNSPECIFIED: + case NL80211_IFTYPE_P2P_DEVICE: + default: + return ERR_PTR(-EINVAL); + } +} + +void brcmf_set_mpc(struct net_device *ndev, int mpc) +{ + struct brcmf_if *ifp = netdev_priv(ndev); + s32 err = 0; + + if (check_vif_up(ifp->vif)) { + err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc); + if (err) { + brcmf_err("fail to set mpc\n"); + return; + } + brcmf_dbg(INFO, "MPC : %d\n", mpc); + } +} + +s32 +brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, + struct net_device *ndev, + bool aborted, bool fw_abort) +{ + struct brcmf_scan_params_le params_le; + struct cfg80211_scan_request *scan_request; + s32 err = 0; + + brcmf_dbg(SCAN, "Enter\n"); + + /* clear scan request, because the FW abort can cause a second call */ + /* to this functon and might cause a double cfg80211_scan_done */ + scan_request = cfg->scan_request; + cfg->scan_request = NULL; + + if (timer_pending(&cfg->escan_timeout)) + del_timer_sync(&cfg->escan_timeout); + + if (fw_abort) { + /* Do a scan abort to stop the driver's scan engine */ + brcmf_dbg(SCAN, "ABORT scan in firmware\n"); + memset(¶ms_le, 0, sizeof(params_le)); + memset(params_le.bssid, 0xFF, ETH_ALEN); + params_le.bss_type = DOT11_BSSTYPE_ANY; + params_le.scan_type = 0; + params_le.channel_num = cpu_to_le32(1); + params_le.nprobes = cpu_to_le32(1); + params_le.active_time = cpu_to_le32(-1); + params_le.passive_time = cpu_to_le32(-1); + params_le.home_time = cpu_to_le32(-1); + /* Scan is aborted by setting channel_list[0] to -1 */ + params_le.channel_list[0] = cpu_to_le16(-1); + /* E-Scan (or anyother type) can be aborted by SCAN */ + err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN, + ¶ms_le, sizeof(params_le)); + if (err) + brcmf_err("Scan abort failed\n"); + } + /* + * e-scan can be initiated by scheduled scan + * which takes precedence. + */ + if (cfg->sched_escan) { + brcmf_dbg(SCAN, "scheduled scan completed\n"); + cfg->sched_escan = false; + if (!aborted) + cfg80211_sched_scan_results(cfg_to_wiphy(cfg)); + brcmf_set_mpc(ndev, 1); + } else if (scan_request) { + brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n", + aborted ? "Aborted" : "Done"); + cfg80211_scan_done(scan_request, aborted); + brcmf_set_mpc(ndev, 1); + } + if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) + brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n"); + + return err; +} + +static +int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); + struct net_device *ndev = wdev->netdev; + + /* vif event pending in firmware */ + if (brcmf_cfg80211_vif_event_armed(cfg)) + return -EBUSY; + + if (ndev) { + if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status) && + cfg->escan_info.ndev == ndev) + brcmf_notify_escan_complete(cfg, ndev, true, + true); + + brcmf_fil_iovar_int_set(netdev_priv(ndev), "mpc", 1); + } + + switch (wdev->iftype) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_MESH_POINT: + return -EOPNOTSUPP; + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + return brcmf_p2p_del_vif(wiphy, wdev); + case NL80211_IFTYPE_UNSPECIFIED: + case NL80211_IFTYPE_P2P_DEVICE: + default: + return -EINVAL; + } + return -EOPNOTSUPP; +} + static s32 brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { + struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_cfg80211_vif *vif = ifp->vif; s32 infra = 0; @@ -464,10 +671,23 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, infra = 0; break; case NL80211_IFTYPE_STATION: + /* Ignore change for p2p IF. Unclear why supplicant does this */ + if ((vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) || + (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)) { + brcmf_dbg(TRACE, "Ignoring cmd for p2p if\n"); + /* WAR: It is unexpected to get a change of VIF for P2P + * IF, but it happens. The request can not be handled + * but returning EPERM causes a crash. Returning 0 + * without setting ieee80211_ptr->iftype causes trace + * (WARN_ON) but it works with wpa_supplicant + */ + return 0; + } vif->mode = WL_MODE_BSS; infra = 1; break; case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: vif->mode = WL_MODE_AP; ap = 1; break; @@ -477,8 +697,14 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, } if (ap) { - set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state); - brcmf_dbg(INFO, "IF Type = AP\n"); + if (type == NL80211_IFTYPE_P2P_GO) { + brcmf_dbg(INFO, "IF Type = P2P GO\n"); + err = brcmf_p2p_ifchange(cfg, BRCMF_FIL_P2P_IF_GO); + } + if (!err) { + set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state); + brcmf_dbg(INFO, "IF Type = AP\n"); + } } else { err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, infra); if (err) { @@ -497,21 +723,6 @@ done: return err; } -static void brcmf_set_mpc(struct net_device *ndev, int mpc) -{ - struct brcmf_if *ifp = netdev_priv(ndev); - s32 err = 0; - - if (check_vif_up(ifp->vif)) { - err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc); - if (err) { - brcmf_err("fail to set mpc\n"); - return; - } - brcmf_dbg(INFO, "MPC : %d\n", mpc); - } -} - static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le, struct cfg80211_scan_request *request) { @@ -592,69 +803,6 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le, } static s32 -brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, - struct net_device *ndev, - bool aborted, bool fw_abort) -{ - struct brcmf_scan_params_le params_le; - struct cfg80211_scan_request *scan_request; - s32 err = 0; - - brcmf_dbg(SCAN, "Enter\n"); - - /* clear scan request, because the FW abort can cause a second call */ - /* to this functon and might cause a double cfg80211_scan_done */ - scan_request = cfg->scan_request; - cfg->scan_request = NULL; - - if (timer_pending(&cfg->escan_timeout)) - del_timer_sync(&cfg->escan_timeout); - - if (fw_abort) { - /* Do a scan abort to stop the driver's scan engine */ - brcmf_dbg(SCAN, "ABORT scan in firmware\n"); - memset(¶ms_le, 0, sizeof(params_le)); - memset(params_le.bssid, 0xFF, ETH_ALEN); - params_le.bss_type = DOT11_BSSTYPE_ANY; - params_le.scan_type = 0; - params_le.channel_num = cpu_to_le32(1); - params_le.nprobes = cpu_to_le32(1); - params_le.active_time = cpu_to_le32(-1); - params_le.passive_time = cpu_to_le32(-1); - params_le.home_time = cpu_to_le32(-1); - /* Scan is aborted by setting channel_list[0] to -1 */ - params_le.channel_list[0] = cpu_to_le16(-1); - /* E-Scan (or anyother type) can be aborted by SCAN */ - err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN, - ¶ms_le, sizeof(params_le)); - if (err) - brcmf_err("Scan abort failed\n"); - } - /* - * e-scan can be initiated by scheduled scan - * which takes precedence. - */ - if (cfg->sched_escan) { - brcmf_dbg(SCAN, "scheduled scan completed\n"); - cfg->sched_escan = false; - if (!aborted) - cfg80211_sched_scan_results(cfg_to_wiphy(cfg)); - brcmf_set_mpc(ndev, 1); - } else if (scan_request) { - brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n", - aborted ? "Aborted" : "Done"); - cfg80211_scan_done(scan_request, aborted); - brcmf_set_mpc(ndev, 1); - } - if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { - brcmf_err("Scan complete while device not scanning\n"); - return -EPERM; - } - - return err; -} - -static s32 brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev, struct cfg80211_scan_request *request, u16 action) { @@ -705,11 +853,12 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy, s32 err; u32 passive_scan; struct brcmf_scan_results *results; + struct escan_info *escan = &cfg->escan_info; brcmf_dbg(SCAN, "Enter\n"); - cfg->escan_info.ndev = ndev; - cfg->escan_info.wiphy = wiphy; - cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING; + escan->ndev = ndev; + escan->wiphy = wiphy; + escan->escan_state = WL_ESCAN_STATE_SCANNING; passive_scan = cfg->active_scan ? 0 : 1; err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PASSIVE_SCAN, passive_scan); @@ -723,7 +872,7 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy, results->count = 0; results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE; - err = brcmf_run_escan(cfg, ndev, request, WL_ESCAN_ACTION_START); + err = escan->run(cfg, ndev, request, WL_ESCAN_ACTION_START); if (err) brcmf_set_mpc(ndev, 1); return err; @@ -760,6 +909,12 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev, return -EAGAIN; } + /* If scan req comes for p2p0, send it over primary I/F */ + if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) { + ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; + ndev = ifp->ndev; + } + /* Arm scan timeout timer */ mod_timer(&cfg->escan_timeout, jiffies + WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000); @@ -778,6 +933,11 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev, cfg->scan_request = request; set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); if (escan_req) { + cfg->escan_info.run = brcmf_run_escan; + err = brcmf_p2p_scan_prep(wiphy, request, ifp->vif); + if (err) + goto scan_out; + err = brcmf_do_escan(cfg, wiphy, ndev, request); if (err) goto scan_out; @@ -935,31 +1095,6 @@ static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof) memset(prof, 0, sizeof(*prof)); } -static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params, - size_t *join_params_size) -{ - u16 chanspec = 0; - - if (ch != 0) { - if (ch <= CH_MAX_2G_CHANNEL) - chanspec |= WL_CHANSPEC_BAND_2G; - else - chanspec |= WL_CHANSPEC_BAND_5G; - - chanspec |= WL_CHANSPEC_BW_20; - chanspec |= WL_CHANSPEC_CTL_SB_NONE; - - *join_params_size += BRCMF_ASSOC_PARAMS_FIXED_SIZE + - sizeof(u16); - - chanspec |= (ch & WL_CHANSPEC_CHAN_MASK); - join_params->params_le.chanspec_list[0] = cpu_to_le16(chanspec); - join_params->params_le.chanspec_num = cpu_to_le32(1); - - brcmf_dbg(CONN, "channel %d, chanspec %#X\n", ch, chanspec); - } -} - static void brcmf_link_down(struct brcmf_cfg80211_vif *vif) { s32 err = 0; @@ -990,6 +1125,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, s32 err = 0; s32 wsec = 0; s32 bcnprd; + u16 chanspec; brcmf_dbg(TRACE, "Enter\n"); if (!check_vif_up(ifp->vif)) @@ -1093,8 +1229,11 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, params->chandef.chan->center_freq); if (params->channel_fixed) { /* adding chanspec */ - brcmf_ch_to_chanspec(cfg->channel, - &join_params, &join_params_size); + chanspec = channel_to_chanspec(params->chandef.chan); + join_params.params_le.chanspec_list[0] = + cpu_to_le16(chanspec); + join_params.params_le.chanspec_num = cpu_to_le32(1); + join_params_size += sizeof(join_params.params_le); } /* set channel for starter */ @@ -1157,7 +1296,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev, else val = WPA_AUTH_DISABLED; brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val); - err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wpa_auth", val); + err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val); if (err) { brcmf_err("set wpa_auth failed (%d)\n", err); return err; @@ -1196,7 +1335,7 @@ static s32 brcmf_set_auth_type(struct net_device *ndev, break; } - err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "auth", val); + err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val); if (err) { brcmf_err("set auth failed (%d)\n", err); return err; @@ -1260,7 +1399,12 @@ brcmf_set_set_cipher(struct net_device *ndev, } brcmf_dbg(CONN, "pval (%d) gval (%d)\n", pval, gval); - err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wsec", pval | gval); + /* In case of privacy, but no security and WPS then simulate */ + /* setting AES. WPS-2.0 allows no security */ + if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval && + sme->privacy) + pval = AES_ENABLED; + err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", pval | gval); if (err) { brcmf_err("error (%d)\n", err); return err; @@ -1282,8 +1426,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) s32 err = 0; if (sme->crypto.n_akm_suites) { - err = brcmf_fil_iovar_int_get(netdev_priv(ndev), - "wpa_auth", &val); + err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), + "wpa_auth", &val); if (err) { brcmf_err("could not get wpa_auth (%d)\n", err); return err; @@ -1317,8 +1461,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) } brcmf_dbg(CONN, "setting wpa_auth to %d\n", val); - err = brcmf_fil_iovar_int_set(netdev_priv(ndev), - "wpa_auth", val); + err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), + "wpa_auth", val); if (err) { brcmf_err("could not set wpa_auth (%d)\n", err); return err; @@ -1395,9 +1539,28 @@ brcmf_set_sharedkey(struct net_device *ndev, return err; } +static +enum nl80211_auth_type brcmf_war_auth_type(struct brcmf_if *ifp, + enum nl80211_auth_type type) +{ + u32 ci; + if (type == NL80211_AUTHTYPE_AUTOMATIC) { + /* shift to ignore chip revision */ + ci = brcmf_get_chip_info(ifp) >> 4; + switch (ci) { + case 43236: + brcmf_dbg(CONN, "43236 WAR: use OPEN instead of AUTO\n"); + return NL80211_AUTHTYPE_OPEN_SYSTEM; + default: + break; + } + } + return type; +} + static s32 brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, - struct cfg80211_connect_params *sme) + struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); @@ -1405,7 +1568,12 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, struct ieee80211_channel *chan = sme->channel; struct brcmf_join_params join_params; size_t join_params_size; - struct brcmf_ssid ssid; + struct brcmf_tlv *rsn_ie; + struct brcmf_vs_tlv *wpa_ie; + void *ie; + u32 ie_len; + struct brcmf_ext_join_params_le *ext_join_params; + u16 chanspec; s32 err = 0; @@ -1418,15 +1586,46 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, return -EOPNOTSUPP; } + if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) { + /* A normal (non P2P) connection request setup. */ + ie = NULL; + ie_len = 0; + /* find the WPA_IE */ + wpa_ie = brcmf_find_wpaie((u8 *)sme->ie, sme->ie_len); + if (wpa_ie) { + ie = wpa_ie; + ie_len = wpa_ie->len + TLV_HDR_LEN; + } else { + /* find the RSN_IE */ + rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len, + WLAN_EID_RSN); + if (rsn_ie) { + ie = rsn_ie; + ie_len = rsn_ie->len + TLV_HDR_LEN; + } + } + brcmf_fil_iovar_data_set(ifp, "wpaie", ie, ie_len); + } + + err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG, + sme->ie, sme->ie_len); + if (err) + brcmf_err("Set Assoc REQ IE Failed\n"); + else + brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n"); + set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state); if (chan) { cfg->channel = ieee80211_frequency_to_channel(chan->center_freq); - brcmf_dbg(CONN, "channel (%d), center_req (%d)\n", - cfg->channel, chan->center_freq); - } else + chanspec = channel_to_chanspec(chan); + brcmf_dbg(CONN, "channel=%d, center_req=%d, chanspec=0x%04x\n", + cfg->channel, chan->center_freq, chanspec); + } else { cfg->channel = 0; + chanspec = 0; + } brcmf_dbg(INFO, "ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len); @@ -1436,6 +1635,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, goto done; } + sme->auth_type = brcmf_war_auth_type(ifp, sme->auth_type); err = brcmf_set_auth_type(ndev, sme); if (err) { brcmf_err("wl_set_auth_type failed (%d)\n", err); @@ -1460,27 +1660,88 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, goto done; } + profile->ssid.SSID_len = min_t(u32, (u32)sizeof(profile->ssid.SSID), + (u32)sme->ssid_len); + memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len); + if (profile->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) { + profile->ssid.SSID[profile->ssid.SSID_len] = 0; + brcmf_dbg(CONN, "SSID \"%s\", len (%d)\n", profile->ssid.SSID, + profile->ssid.SSID_len); + } + + /* Join with specific BSSID and cached SSID + * If SSID is zero join based on BSSID only + */ + join_params_size = offsetof(struct brcmf_ext_join_params_le, assoc_le) + + offsetof(struct brcmf_assoc_params_le, chanspec_list); + if (cfg->channel) + join_params_size += sizeof(u16); + ext_join_params = kzalloc(join_params_size, GFP_KERNEL); + if (ext_join_params == NULL) { + err = -ENOMEM; + goto done; + } + ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len); + memcpy(&ext_join_params->ssid_le.SSID, sme->ssid, + profile->ssid.SSID_len); + /*increase dwell time to receive probe response or detect Beacon + * from target AP at a noisy air only during connect command + */ + ext_join_params->scan_le.active_time = + cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS); + ext_join_params->scan_le.passive_time = + cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS); + /* Set up join scan parameters */ + ext_join_params->scan_le.scan_type = -1; + /* to sync with presence period of VSDB GO. + * Send probe request more frequently. Probe request will be stopped + * when it gets probe response from target AP/GO. + */ + ext_join_params->scan_le.nprobes = + cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS / + BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS); + ext_join_params->scan_le.home_time = cpu_to_le32(-1); + + if (sme->bssid) + memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN); + else + memset(&ext_join_params->assoc_le.bssid, 0xFF, ETH_ALEN); + + if (cfg->channel) { + ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1); + + ext_join_params->assoc_le.chanspec_list[0] = + cpu_to_le16(chanspec); + } + + err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params, + join_params_size); + kfree(ext_join_params); + if (!err) + /* This is it. join command worked, we are done */ + goto done; + + /* join command failed, fallback to set ssid */ memset(&join_params, 0, sizeof(join_params)); join_params_size = sizeof(join_params.ssid_le); - profile->ssid.SSID_len = min_t(u32, - sizeof(ssid.SSID), (u32)sme->ssid_len); memcpy(&join_params.ssid_le.SSID, sme->ssid, profile->ssid.SSID_len); - memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len); join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len); - memset(join_params.params_le.bssid, 0xFF, ETH_ALEN); - - if (ssid.SSID_len < IEEE80211_MAX_SSID_LEN) - brcmf_dbg(CONN, "ssid \"%s\", len (%d)\n", - ssid.SSID, ssid.SSID_len); + if (sme->bssid) + memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN); + else + memset(join_params.params_le.bssid, 0xFF, ETH_ALEN); - brcmf_ch_to_chanspec(cfg->channel, - &join_params, &join_params_size); + if (cfg->channel) { + join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec); + join_params.params_le.chanspec_num = cpu_to_le32(1); + join_params_size += sizeof(join_params.params_le); + } err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, &join_params, join_params_size); if (err) - brcmf_err("WLC_SET_SSID failed (%d)\n", err); + brcmf_err("BRCMF_C_SET_SSID failed (%d)\n", err); done: if (err) @@ -1939,7 +2200,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, goto done; } /* Report the current tx rate */ - err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate); + err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate); if (err) { brcmf_err("Could not get rate (%d)\n", err); goto done; @@ -2011,67 +2272,6 @@ done: return err; } -static s32 -brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev, - const u8 *addr, - const struct cfg80211_bitrate_mask *mask) -{ - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcm_rateset_le rateset_le; - s32 rate; - s32 val; - s32 err_bg; - s32 err_a; - u32 legacy; - s32 err = 0; - - brcmf_dbg(TRACE, "Enter\n"); - if (!check_vif_up(ifp->vif)) - return -EIO; - - /* addr param is always NULL. ignore it */ - /* Get current rateset */ - err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CURR_RATESET, - &rateset_le, sizeof(rateset_le)); - if (err) { - brcmf_err("could not get current rateset (%d)\n", err); - goto done; - } - - legacy = ffs(mask->control[IEEE80211_BAND_2GHZ].legacy & 0xFFFF); - if (!legacy) - legacy = ffs(mask->control[IEEE80211_BAND_5GHZ].legacy & - 0xFFFF); - - val = wl_g_rates[legacy - 1].bitrate * 100000; - - if (val < le32_to_cpu(rateset_le.count)) - /* Select rate by rateset index */ - rate = rateset_le.rates[val] & 0x7f; - else - /* Specified rate in bps */ - rate = val / 500000; - - brcmf_dbg(CONN, "rate %d mbps\n", rate / 2); - - /* - * - * Set rate override, - * Since the is a/b/g-blind, both a/bg_rate are enforced. - */ - err_bg = brcmf_fil_iovar_int_set(ifp, "bg_rate", rate); - err_a = brcmf_fil_iovar_int_set(ifp, "a_rate", rate); - if (err_bg && err_a) { - brcmf_err("could not set fixed rate (%d) (%d)\n", err_bg, - err_a); - err = err_bg | err_a; - } - -done: - brcmf_dbg(TRACE, "Exit\n"); - return err; -} - static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, struct brcmf_bss_info_le *bi) { @@ -2123,7 +2323,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, if (!bss) return -ENOMEM; - cfg80211_put_bss(bss); + cfg80211_put_bss(wiphy, bss); return err; } @@ -2229,7 +2429,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg, goto CleanUp; } - cfg80211_put_bss(bss); + cfg80211_put_bss(wiphy, bss); CleanUp: @@ -2245,78 +2445,10 @@ static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif) return vif->mode == WL_MODE_IBSS; } -/* - * Traverse a string of 1-byte tag/1-byte length/variable-length value - * triples, returning a pointer to the substring whose first element - * matches tag - */ -static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key) -{ - struct brcmf_tlv *elt; - int totlen; - - elt = (struct brcmf_tlv *) buf; - totlen = buflen; - - /* find tagged parameter */ - while (totlen >= TLV_HDR_LEN) { - int len = elt->len; - - /* validate remaining totlen */ - if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN))) - return elt; - - elt = (struct brcmf_tlv *) ((u8 *) elt + (len + TLV_HDR_LEN)); - totlen -= (len + TLV_HDR_LEN); - } - - return NULL; -} - -/* Is any of the tlvs the expected entry? If - * not update the tlvs buffer pointer/length. - */ -static bool -brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, - u8 *oui, u32 oui_len, u8 type) +static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg, + struct brcmf_if *ifp) { - /* If the contents match the OUI and the type */ - if (ie[TLV_LEN_OFF] >= oui_len + 1 && - !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) && - type == ie[TLV_BODY_OFF + oui_len]) { - return true; - } - - if (tlvs == NULL) - return false; - /* point to the next ie */ - ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN; - /* calculate the length of the rest of the buffer */ - *tlvs_len -= (int)(ie - *tlvs); - /* update the pointer to the start of the buffer */ - *tlvs = ie; - - return false; -} - -static struct brcmf_vs_tlv * -brcmf_find_wpaie(u8 *parse, u32 len) -{ - struct brcmf_tlv *ie; - - while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) { - if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len, - WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE)) - return (struct brcmf_vs_tlv *)ie; - } - return NULL; -} - -static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg) -{ - struct net_device *ndev = cfg_to_ndev(cfg); - struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev); - struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev); struct brcmf_bss_info_le *bi; struct brcmf_ssid *ssid; struct brcmf_tlv *tim; @@ -2372,7 +2504,7 @@ update_bss_info_out: return err; } -static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg) +void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg) { struct escan_info *escan = &cfg->escan_info; @@ -2391,8 +2523,7 @@ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work) container_of(work, struct brcmf_cfg80211_info, escan_timeout_work); - brcmf_notify_escan_complete(cfg, - cfg->escan_info.ndev, true, true); + brcmf_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); } static void brcmf_escan_timeout(unsigned long data) @@ -2469,11 +2600,6 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, brcmf_err("Invalid escan result (NULL pointer)\n"); goto exit; } - if (!cfg->scan_request) { - brcmf_dbg(SCAN, "result without cfg80211 request\n"); - goto exit; - } - if (le16_to_cpu(escan_result_le->bss_count) != 1) { brcmf_err("Invalid bss_count %d: ignoring\n", escan_result_le->bss_count); @@ -2481,6 +2607,14 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, } bss_info_le = &escan_result_le->bss_info_le; + if (brcmf_p2p_scan_finding_common_channel(cfg, bss_info_le)) + goto exit; + + if (!cfg->scan_request) { + brcmf_dbg(SCAN, "result without cfg80211 request\n"); + goto exit; + } + bi_length = le32_to_cpu(bss_info_le->length); if (bi_length != (le32_to_cpu(escan_result_le->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) { @@ -2519,6 +2653,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, list->count++; } else { cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + if (brcmf_p2p_scan_finding_common_channel(cfg, NULL)) + goto exit; if (cfg->scan_request) { cfg->bss_list = (struct brcmf_scan_results *) cfg->escan_info.escan_buf; @@ -2527,7 +2663,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, brcmf_notify_escan_complete(cfg, ndev, aborted, false); } else - brcmf_err("Unexpected scan result 0x%x\n", status); + brcmf_dbg(SCAN, "Ignored scan complete result 0x%x\n", + status); } exit: return err; @@ -3031,9 +3168,8 @@ static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len) } #endif -static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx) +static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp) { - struct brcmf_if *ifp = netdev_priv(ndev); s32 err; /* set auth */ @@ -3292,7 +3428,7 @@ brcmf_parse_vndr_ies(const u8 *vndr_ie_buf, u32 vndr_ie_len, parsed_info->vndrie.oui[2], parsed_info->vndrie.oui_type); - if (vndr_ies->count >= MAX_VNDR_IE_NUMBER) + if (vndr_ies->count >= VNDR_IE_PARSE_LIMIT) break; next: remaining_len -= (ie->len + TLV_HDR_LEN); @@ -3326,7 +3462,6 @@ brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd) return ie_len + VNDR_IE_HDR_SIZE; } -static s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, const u8 *vndr_ie_buf, u32 vndr_ie_len) { @@ -3358,24 +3493,28 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, if (!iovar_ie_buf) return -ENOMEM; curr_ie_buf = iovar_ie_buf; - if (ifp->vif->mode == WL_MODE_AP) { - switch (pktflag) { - case VNDR_IE_PRBRSP_FLAG: - mgmt_ie_buf = saved_ie->probe_res_ie; - mgmt_ie_len = &saved_ie->probe_res_ie_len; - mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie); - break; - case VNDR_IE_BEACON_FLAG: - mgmt_ie_buf = saved_ie->beacon_ie; - mgmt_ie_len = &saved_ie->beacon_ie_len; - mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie); - break; - default: - err = -EPERM; - brcmf_err("not suitable type\n"); - goto exit; - } - } else { + switch (pktflag) { + case BRCMF_VNDR_IE_PRBREQ_FLAG: + mgmt_ie_buf = saved_ie->probe_req_ie; + mgmt_ie_len = &saved_ie->probe_req_ie_len; + mgmt_ie_buf_len = sizeof(saved_ie->probe_req_ie); + break; + case BRCMF_VNDR_IE_PRBRSP_FLAG: + mgmt_ie_buf = saved_ie->probe_res_ie; + mgmt_ie_len = &saved_ie->probe_res_ie_len; + mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie); + break; + case BRCMF_VNDR_IE_BEACON_FLAG: + mgmt_ie_buf = saved_ie->beacon_ie; + mgmt_ie_len = &saved_ie->beacon_ie_len; + mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie); + break; + case BRCMF_VNDR_IE_ASSOCREQ_FLAG: + mgmt_ie_buf = saved_ie->assoc_req_ie; + mgmt_ie_len = &saved_ie->assoc_req_ie_len; + mgmt_ie_buf_len = sizeof(saved_ie->assoc_req_ie); + break; + default: err = -EPERM; brcmf_err("not suitable type\n"); goto exit; @@ -3484,6 +3623,49 @@ exit: return err; } +s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif) +{ + s32 pktflags[] = { + BRCMF_VNDR_IE_PRBREQ_FLAG, + BRCMF_VNDR_IE_PRBRSP_FLAG, + BRCMF_VNDR_IE_BEACON_FLAG + }; + int i; + + for (i = 0; i < ARRAY_SIZE(pktflags); i++) + brcmf_vif_set_mgmt_ie(vif, pktflags[i], NULL, 0); + + memset(&vif->saved_ie, 0, sizeof(vif->saved_ie)); + return 0; +} + +static s32 +brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif, + struct cfg80211_beacon_data *beacon) +{ + s32 err; + + /* Set Beacon IEs to FW */ + err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_BEACON_FLAG, + beacon->tail, beacon->tail_len); + if (err) { + brcmf_err("Set Beacon IE Failed\n"); + return err; + } + brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n"); + + /* Set Probe Response IEs to FW */ + err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBRSP_FLAG, + beacon->proberesp_ies, + beacon->proberesp_ies_len); + if (err) + brcmf_err("Set Probe Resp IE Failed\n"); + else + brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n"); + + return err; +} + static s32 brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_ap_settings *settings) @@ -3496,7 +3678,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, struct brcmf_tlv *rsn_ie; struct brcmf_vs_tlv *wpa_ie; struct brcmf_join_params join_params; - s32 bssidx = 0; + enum nl80211_iftype dev_role; + struct brcmf_fil_bss_enable_le bss_enable; brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n", cfg80211_get_chandef_type(&settings->chandef), @@ -3506,10 +3689,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, settings->ssid, settings->ssid_len, settings->auth_type, settings->inactivity_timeout); - if (!test_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state)) { - brcmf_err("Not in AP creation mode\n"); - return -EPERM; - } + dev_role = ifp->vif->wdev.iftype; memset(&ssid_le, 0, sizeof(ssid_le)); if (settings->ssid == NULL || settings->ssid_len == 0) { @@ -3530,21 +3710,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, } brcmf_set_mpc(ndev, 0); - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1); - if (err < 0) { - brcmf_err("BRCMF_C_DOWN error %d\n", err); - goto exit; - } - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1); - if (err < 0) { - brcmf_err("SET INFRA error %d\n", err); - goto exit; - } - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1); - if (err < 0) { - brcmf_err("setting AP mode failed %d\n", err); - goto exit; - } /* find the RSN_IE */ rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, @@ -3570,27 +3735,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, } } else { brcmf_dbg(TRACE, "No WPA(2) IEs found\n"); - brcmf_configure_opensecurity(ndev, bssidx); + brcmf_configure_opensecurity(ifp); } - /* Set Beacon IEs to FW */ - err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev), - VNDR_IE_BEACON_FLAG, - settings->beacon.tail, - settings->beacon.tail_len); - if (err) - brcmf_err("Set Beacon IE Failed\n"); - else - brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n"); - /* Set Probe Response IEs to FW */ - err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev), - VNDR_IE_PRBRSP_FLAG, - settings->beacon.proberesp_ies, - settings->beacon.proberesp_ies_len); - if (err) - brcmf_err("Set Probe Resp IE Failed\n"); - else - brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n"); + brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon); if (settings->beacon_interval) { err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, @@ -3608,22 +3756,62 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, goto exit; } } - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1); - if (err < 0) { - brcmf_err("BRCMF_C_UP error (%d)\n", err); - goto exit; + + if (dev_role == NL80211_IFTYPE_AP) { + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1); + if (err < 0) { + brcmf_err("BRCMF_C_DOWN error %d\n", err); + goto exit; + } + brcmf_fil_iovar_int_set(ifp, "apsta", 0); } - memset(&join_params, 0, sizeof(join_params)); - /* join parameters starts with ssid */ - memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le)); - /* create softap */ - err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, - &join_params, sizeof(join_params)); + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1); if (err < 0) { - brcmf_err("SET SSID error (%d)\n", err); + brcmf_err("SET INFRA error %d\n", err); goto exit; } + if (dev_role == NL80211_IFTYPE_AP) { + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1); + if (err < 0) { + brcmf_err("setting AP mode failed %d\n", err); + goto exit; + } + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1); + if (err < 0) { + brcmf_err("BRCMF_C_UP error (%d)\n", err); + goto exit; + } + + memset(&join_params, 0, sizeof(join_params)); + /* join parameters starts with ssid */ + memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le)); + /* create softap */ + err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, + &join_params, sizeof(join_params)); + if (err < 0) { + brcmf_err("SET SSID error (%d)\n", err); + goto exit; + } + brcmf_dbg(TRACE, "AP mode configuration complete\n"); + } else { + err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le, + sizeof(ssid_le)); + if (err < 0) { + brcmf_err("setting ssid failed %d\n", err); + goto exit; + } + bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); + bss_enable.enable = cpu_to_le32(1); + err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable, + sizeof(bss_enable)); + if (err < 0) { + brcmf_err("bss_enable config failed %d\n", err); + goto exit; + } + + brcmf_dbg(TRACE, "GO mode configuration complete\n"); + } clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); @@ -3637,10 +3825,11 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) { struct brcmf_if *ifp = netdev_priv(ndev); s32 err = -EPERM; + struct brcmf_fil_bss_enable_le bss_enable; brcmf_dbg(TRACE, "Enter\n"); - if (ifp->vif->mode == WL_MODE_AP) { + if (ifp->vif->wdev.iftype == NL80211_IFTYPE_AP) { /* Due to most likely deauths outstanding we sleep */ /* first to make sure they get processed by fw. */ msleep(400); @@ -3654,18 +3843,41 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) brcmf_err("BRCMF_C_UP error %d\n", err); goto exit; } - brcmf_set_mpc(ndev, 1); - clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); - clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); + } else { + bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); + bss_enable.enable = cpu_to_le32(0); + err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable, + sizeof(bss_enable)); + if (err < 0) + brcmf_err("bss_enable config failed %d\n", err); } + brcmf_set_mpc(ndev, 1); + set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); + clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); + exit: return err; } +static s32 +brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_beacon_data *info) +{ + struct brcmf_if *ifp = netdev_priv(ndev); + s32 err; + + brcmf_dbg(TRACE, "Enter\n"); + + err = brcmf_config_ap_mgmt_ie(ifp->vif, info); + + return err; +} + static int brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev, u8 *mac) { + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_scb_val_le scbval; struct brcmf_if *ifp = netdev_priv(ndev); s32 err; @@ -3675,6 +3887,8 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev, brcmf_dbg(TRACE, "Enter %pM\n", mac); + if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) + ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; if (!check_vif_up(ifp->vif)) return -EIO; @@ -3689,7 +3903,147 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev, return err; } + +static void +brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy, + struct wireless_dev *wdev, + u16 frame_type, bool reg) +{ + struct brcmf_if *ifp = netdev_priv(wdev->netdev); + struct brcmf_cfg80211_vif *vif = ifp->vif; + u16 mgmt_type; + + brcmf_dbg(TRACE, "Enter, frame_type %04x, reg=%d\n", frame_type, reg); + + mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4; + if (reg) + vif->mgmt_rx_reg |= BIT(mgmt_type); + else + vif->mgmt_rx_reg &= ~BIT(mgmt_type); +} + + +static int +brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, + struct ieee80211_channel *chan, bool offchan, + unsigned int wait, const u8 *buf, size_t len, + bool no_cck, bool dont_wait_for_ack, u64 *cookie) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + const struct ieee80211_mgmt *mgmt; + struct brcmf_if *ifp; + struct brcmf_cfg80211_vif *vif; + s32 err = 0; + s32 ie_offset; + s32 ie_len; + struct brcmf_fil_action_frame_le *action_frame; + struct brcmf_fil_af_params_le *af_params; + bool ack; + s32 chan_nr; + + brcmf_dbg(TRACE, "Enter\n"); + + *cookie = 0; + + mgmt = (const struct ieee80211_mgmt *)buf; + + if (!ieee80211_is_mgmt(mgmt->frame_control)) { + brcmf_err("Driver only allows MGMT packet type\n"); + return -EPERM; + } + + if (ieee80211_is_probe_resp(mgmt->frame_control)) { + /* Right now the only reason to get a probe response */ + /* is for p2p listen response or for p2p GO from */ + /* wpa_supplicant. Unfortunately the probe is send */ + /* on primary ndev, while dongle wants it on the p2p */ + /* vif. Since this is only reason for a probe */ + /* response to be sent, the vif is taken from cfg. */ + /* If ever desired to send proberesp for non p2p */ + /* response then data should be checked for */ + /* "DIRECT-". Note in future supplicant will take */ + /* dedicated p2p wdev to do this and then this 'hack'*/ + /* is not needed anymore. */ + ie_offset = DOT11_MGMT_HDR_LEN + + DOT11_BCN_PRB_FIXED_LEN; + ie_len = len - ie_offset; + ifp = netdev_priv(wdev->netdev); + vif = ifp->vif; + if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) + vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + err = brcmf_vif_set_mgmt_ie(vif, + BRCMF_VNDR_IE_PRBRSP_FLAG, + &buf[ie_offset], + ie_len); + cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, + GFP_KERNEL); + } else if (ieee80211_is_action(mgmt->frame_control)) { + af_params = kzalloc(sizeof(*af_params), GFP_KERNEL); + if (af_params == NULL) { + brcmf_err("unable to allocate frame\n"); + err = -ENOMEM; + goto exit; + } + action_frame = &af_params->action_frame; + /* Add the packet Id */ + action_frame->packet_id = cpu_to_le32(*cookie); + /* Add BSSID */ + memcpy(&action_frame->da[0], &mgmt->da[0], ETH_ALEN); + memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN); + /* Add the length exepted for 802.11 header */ + action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN); + /* Add the channel */ + chan_nr = ieee80211_frequency_to_channel(chan->center_freq); + af_params->channel = cpu_to_le32(chan_nr); + + memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], + le16_to_cpu(action_frame->len)); + + brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n", + *cookie, le16_to_cpu(action_frame->len), + chan->center_freq); + + ack = brcmf_p2p_send_action_frame(cfg, wdev->netdev, + af_params); + + cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack, + GFP_KERNEL); + kfree(af_params); + } else { + brcmf_dbg(TRACE, "Unhandled, fc=%04x!!\n", mgmt->frame_control); + brcmf_dbg_hex_dump(true, buf, len, "payload, len=%Zu\n", len); + } + +exit: + return err; +} + + +static int +brcmf_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, + struct wireless_dev *wdev, + u64 cookie) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_cfg80211_vif *vif; + int err = 0; + + brcmf_dbg(TRACE, "Enter p2p listen cancel\n"); + + vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + if (vif == NULL) { + brcmf_err("No p2p device available for probe response\n"); + err = -ENODEV; + goto exit; + } + brcmf_p2p_cancel_remain_on_channel(vif->ifp); +exit: + return err; +} + static struct cfg80211_ops wl_cfg80211_ops = { + .add_virtual_intf = brcmf_cfg80211_add_iface, + .del_virtual_intf = brcmf_cfg80211_del_iface, .change_virtual_intf = brcmf_cfg80211_change_iface, .scan = brcmf_cfg80211_scan, .set_wiphy_params = brcmf_cfg80211_set_wiphy_params, @@ -3704,7 +4058,6 @@ static struct cfg80211_ops wl_cfg80211_ops = { .set_default_key = brcmf_cfg80211_config_default_key, .set_default_mgmt_key = brcmf_cfg80211_config_default_mgmt_key, .set_power_mgmt = brcmf_cfg80211_set_power_mgmt, - .set_bitrate_mask = brcmf_cfg80211_set_bitrate_mask, .connect = brcmf_cfg80211_connect, .disconnect = brcmf_cfg80211_disconnect, .suspend = brcmf_cfg80211_suspend, @@ -3714,28 +4067,43 @@ static struct cfg80211_ops wl_cfg80211_ops = { .flush_pmksa = brcmf_cfg80211_flush_pmksa, .start_ap = brcmf_cfg80211_start_ap, .stop_ap = brcmf_cfg80211_stop_ap, + .change_beacon = brcmf_cfg80211_change_beacon, .del_station = brcmf_cfg80211_del_station, .sched_scan_start = brcmf_cfg80211_sched_scan_start, .sched_scan_stop = brcmf_cfg80211_sched_scan_stop, + .mgmt_frame_register = brcmf_cfg80211_mgmt_frame_register, + .mgmt_tx = brcmf_cfg80211_mgmt_tx, + .remain_on_channel = brcmf_p2p_remain_on_channel, + .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel, #ifdef CONFIG_NL80211_TESTMODE .testmode_cmd = brcmf_cfg80211_testmode #endif }; -static s32 brcmf_mode_to_nl80211_iftype(s32 mode) +static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type) { - s32 err = 0; - - switch (mode) { - case WL_MODE_BSS: - return NL80211_IFTYPE_STATION; - case WL_MODE_IBSS: - return NL80211_IFTYPE_ADHOC; + switch (type) { + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_MESH_POINT: + return -ENOTSUPP; + case NL80211_IFTYPE_ADHOC: + return WL_MODE_IBSS; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + return WL_MODE_BSS; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + return WL_MODE_AP; + case NL80211_IFTYPE_P2P_DEVICE: + return WL_MODE_P2P; + case NL80211_IFTYPE_UNSPECIFIED: default: - return NL80211_IFTYPE_UNSPECIFIED; + break; } - return err; + return -EINVAL; } static void brcmf_wiphy_pno_params(struct wiphy *wiphy) @@ -3747,6 +4115,56 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy) wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; } +static const struct ieee80211_iface_limit brcmf_iface_limits[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP) + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE) + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) + }, +}; +static const struct ieee80211_iface_combination brcmf_iface_combos[] = { + { + .max_interfaces = BRCMF_IFACE_MAX_CNT, + .num_different_channels = 1, /* no multi-channel for now */ + .n_limits = ARRAY_SIZE(brcmf_iface_limits), + .limits = brcmf_iface_limits + } +}; + +static const struct ieee80211_txrx_stypes +brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = { + [NL80211_IFTYPE_STATION] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_P2P_CLIENT] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_P2P_GO] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) + } +}; + static struct wiphy *brcmf_setup_wiphy(struct device *phydev) { struct wiphy *wiphy; @@ -3759,10 +4177,16 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev) } set_wiphy_dev(wiphy, phydev); wiphy->max_scan_ssids = WL_NUM_SCAN_MAX; + wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX; wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | - BIT(NL80211_IFTYPE_AP); + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_DEVICE); + wiphy->iface_combinations = brcmf_iface_combos; + wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set * it as 11a by default. @@ -3774,10 +4198,11 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev) wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wiphy->cipher_suites = __wl_cipher_suites; wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); - wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power - * save mode - * by default - */ + wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT | + WIPHY_FLAG_OFFCHAN_TX | + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + wiphy->mgmt_stypes = brcmf_txrx_stypes; + wiphy->max_remain_on_channel_duration = 5000; brcmf_wiphy_pno_params(wiphy); err = wiphy_register(wiphy); if (err < 0) { @@ -3788,31 +4213,25 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev) return wiphy; } -static struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, - struct net_device *netdev, - s32 mode, bool pm_block) + enum nl80211_iftype type, + bool pm_block) { struct brcmf_cfg80211_vif *vif; if (cfg->vif_cnt == BRCMF_IFACE_MAX_CNT) return ERR_PTR(-ENOSPC); + brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n", + sizeof(*vif)); vif = kzalloc(sizeof(*vif), GFP_KERNEL); if (!vif) return ERR_PTR(-ENOMEM); vif->wdev.wiphy = cfg->wiphy; - vif->wdev.netdev = netdev; - vif->wdev.iftype = brcmf_mode_to_nl80211_iftype(mode); + vif->wdev.iftype = type; - if (netdev) { - vif->ifp = netdev_priv(netdev); - netdev->ieee80211_ptr = &vif->wdev; - SET_NETDEV_DEV(netdev, wiphy_dev(cfg->wiphy)); - } - - vif->mode = mode; + vif->mode = brcmf_nl80211_iftype_to_mode(type); vif->pm_block = pm_block; vif->roam_off = -1; @@ -3823,7 +4242,7 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, return vif; } -static void brcmf_free_vif(struct brcmf_cfg80211_vif *vif) +void brcmf_free_vif(struct brcmf_cfg80211_vif *vif) { struct brcmf_cfg80211_info *cfg; struct wiphy *wiphy; @@ -3897,9 +4316,9 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg) conn_info->resp_ie_len = 0; } -static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg) +static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, + struct brcmf_if *ifp) { - struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); struct brcmf_cfg80211_assoc_ielen_le *assoc_info; struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); u32 req_len; @@ -3975,9 +4394,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg, brcmf_dbg(TRACE, "Enter\n"); - brcmf_get_assoc_ies(cfg); + brcmf_get_assoc_ies(cfg, ifp); memcpy(profile->bssid, e->addr, ETH_ALEN); - brcmf_update_bss_info(cfg); + brcmf_update_bss_info(cfg, ifp); buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); if (buf == NULL) { @@ -4032,9 +4451,11 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg, if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) { if (completed) { - brcmf_get_assoc_ies(cfg); + brcmf_get_assoc_ies(cfg, ifp); memcpy(profile->bssid, e->addr, ETH_ALEN); - brcmf_update_bss_info(cfg); + brcmf_update_bss_info(cfg, ifp); + set_bit(BRCMF_VIF_STATUS_CONNECTED, + &ifp->vif->sme_state); } cfg80211_connect_result(ndev, (u8 *)profile->bssid, @@ -4045,9 +4466,6 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg, completed ? WLAN_STATUS_SUCCESS : WLAN_STATUS_AUTH_TIMEOUT, GFP_KERNEL); - if (completed) - set_bit(BRCMF_VIF_STATUS_CONNECTED, - &ifp->vif->sme_state); brcmf_dbg(CONN, "Report connect result - connection %s\n", completed ? "succeeded" : "failed"); } @@ -4060,38 +4478,38 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg, struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { - s32 err = 0; + static int generation; u32 event = e->event_code; u32 reason = e->reason; - u32 len = e->datalen; - static int generation; - struct station_info sinfo; brcmf_dbg(CONN, "event %d, reason %d\n", event, reason); - memset(&sinfo, 0, sizeof(sinfo)); + if (event == BRCMF_E_LINK && reason == BRCMF_E_REASON_LINK_BSSCFG_DIS && + ndev != cfg_to_ndev(cfg)) { + brcmf_dbg(CONN, "AP mode link down\n"); + complete(&cfg->vif_disabled); + return 0; + } - sinfo.filled = 0; if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) && - reason == BRCMF_E_STATUS_SUCCESS) { + (reason == BRCMF_E_STATUS_SUCCESS)) { + memset(&sinfo, 0, sizeof(sinfo)); sinfo.filled = STATION_INFO_ASSOC_REQ_IES; if (!data) { brcmf_err("No IEs present in ASSOC/REASSOC_IND"); return -EINVAL; } sinfo.assoc_req_ies = data; - sinfo.assoc_req_ies_len = len; + sinfo.assoc_req_ies_len = e->datalen; generation++; sinfo.generation = generation; - cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_ATOMIC); + cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_KERNEL); } else if ((event == BRCMF_E_DISASSOC_IND) || (event == BRCMF_E_DEAUTH_IND) || (event == BRCMF_E_DEAUTH)) { - generation++; - sinfo.generation = generation; - cfg80211_del_sta(ndev, e->addr, GFP_ATOMIC); + cfg80211_del_sta(ndev, e->addr, GFP_KERNEL); } - return err; + return 0; } static s32 @@ -4128,6 +4546,8 @@ brcmf_notify_connect_status(struct brcmf_if *ifp, } brcmf_link_down(ifp->vif); brcmf_init_prof(ndev_to_prof(ndev)); + if (ndev != cfg_to_ndev(cfg)) + complete(&cfg->vif_disabled); } else if (brcmf_is_nonetwork(cfg, e)) { if (brcmf_is_ibssmode(ifp->vif)) clear_bit(BRCMF_VIF_STATUS_CONNECTING, @@ -4176,6 +4596,57 @@ brcmf_notify_mic_status(struct brcmf_if *ifp, return 0; } +static s32 brcmf_notify_vif_event(struct brcmf_if *ifp, + const struct brcmf_event_msg *e, void *data) +{ + struct brcmf_cfg80211_info *cfg = ifp->drvr->config; + struct brcmf_if_event *ifevent = (struct brcmf_if_event *)data; + struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; + struct brcmf_cfg80211_vif *vif; + + brcmf_dbg(TRACE, "Enter: action %u flags %u ifidx %u bsscfg %u\n", + ifevent->action, ifevent->flags, ifevent->ifidx, + ifevent->bssidx); + + mutex_lock(&event->vif_event_lock); + event->action = ifevent->action; + vif = event->vif; + + switch (ifevent->action) { + case BRCMF_E_IF_ADD: + /* waiting process may have timed out */ + if (!cfg->vif_event.vif) + return -EBADF; + + ifp->vif = vif; + vif->ifp = ifp; + vif->wdev.netdev = ifp->ndev; + ifp->ndev->ieee80211_ptr = &vif->wdev; + SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy)); + mutex_unlock(&event->vif_event_lock); + wake_up(&event->vif_wq); + return 0; + + case BRCMF_E_IF_DEL: + ifp->vif = NULL; + mutex_unlock(&event->vif_event_lock); + /* event may not be upon user request */ + if (brcmf_cfg80211_vif_event_armed(cfg)) + wake_up(&event->vif_wq); + return 0; + + case BRCMF_E_IF_CHANGE: + mutex_unlock(&event->vif_event_lock); + wake_up(&event->vif_wq); + return 0; + + default: + mutex_unlock(&event->vif_event_lock); + break; + } + return -EINVAL; +} + static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf) { conf->frag_threshold = (u32)-1; @@ -4207,6 +4678,18 @@ static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg) brcmf_notify_connect_status); brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND, brcmf_notify_sched_scan_results); + brcmf_fweh_register(cfg->pub, BRCMF_E_IF, + brcmf_notify_vif_event); + brcmf_fweh_register(cfg->pub, BRCMF_E_P2P_PROBEREQ_MSG, + brcmf_p2p_notify_rx_mgmt_p2p_probereq); + brcmf_fweh_register(cfg->pub, BRCMF_E_P2P_DISC_LISTEN_COMPLETE, + brcmf_p2p_notify_listen_complete); + brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_RX, + brcmf_p2p_notify_action_frame_rx); + brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_COMPLETE, + brcmf_p2p_notify_action_tx_complete); + brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE, + brcmf_p2p_notify_action_tx_complete); } static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg) @@ -4262,7 +4745,7 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg) mutex_init(&cfg->usr_sync); brcmf_init_escan(cfg); brcmf_init_conf(cfg->conf); - + init_completion(&cfg->vif_disabled); return err; } @@ -4273,6 +4756,12 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg) brcmf_deinit_priv_mem(cfg); } +static void init_vif_event(struct brcmf_cfg80211_vif_event *event) +{ + init_waitqueue_head(&event->vif_wq); + mutex_init(&event->vif_event_lock); +} + struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, struct device *busdev) { @@ -4296,25 +4785,41 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, cfg = wiphy_priv(wiphy); cfg->wiphy = wiphy; cfg->pub = drvr; + init_vif_event(&cfg->vif_event); INIT_LIST_HEAD(&cfg->vif_list); - vif = brcmf_alloc_vif(cfg, ndev, WL_MODE_BSS, false); + vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION, false); if (IS_ERR(vif)) { wiphy_free(wiphy); return NULL; } + vif->ifp = ifp; + vif->wdev.netdev = ndev; + ndev->ieee80211_ptr = &vif->wdev; + SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy)); + err = wl_init_priv(cfg); if (err) { brcmf_err("Failed to init iwm_priv (%d)\n", err); goto cfg80211_attach_out; } - ifp->vif = vif; + + err = brcmf_p2p_attach(cfg); + if (err) { + brcmf_err("P2P initilisation failed (%d)\n", err); + goto cfg80211_p2p_attach_out; + } + return cfg; +cfg80211_p2p_attach_out: + wl_deinit_priv(cfg); + cfg80211_attach_out: brcmf_free_vif(vif); + wiphy_free(wiphy); return NULL; } @@ -4330,9 +4835,8 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg) } static s32 -brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout) +brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout) { - struct brcmf_if *ifp = netdev_priv(ndev); s32 err = 0; __le32 roamtrigger[2]; __le32 roam_delta[2]; @@ -4383,10 +4887,9 @@ dongle_rom_out: } static s32 -brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time, +brcmf_dongle_scantime(struct brcmf_if *ifp, s32 scan_assoc_time, s32 scan_unassoc_time, s32 scan_passive_time) { - struct brcmf_if *ifp = netdev_priv(ndev); s32 err = 0; err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME, @@ -4456,6 +4959,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) { struct net_device *ndev; struct wireless_dev *wdev; + struct brcmf_if *ifp; s32 power_mode; s32 err = 0; @@ -4464,35 +4968,34 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) ndev = cfg_to_ndev(cfg); wdev = ndev->ieee80211_ptr; + ifp = netdev_priv(ndev); + + /* make sure RF is ready for work */ + brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); - brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME, - WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME); + brcmf_dongle_scantime(ifp, WL_SCAN_CHANNEL_TIME, + WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME); power_mode = cfg->pwr_save ? PM_FAST : PM_OFF; - err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PM, - power_mode); + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, power_mode); if (err) goto default_conf_out; brcmf_dbg(INFO, "power save set to %s\n", (power_mode ? "enabled" : "disabled")); - err = brcmf_dongle_roam(ndev, (cfg->roam_on ? 0 : 1), - WL_BEACON_TIMEOUT); + err = brcmf_dongle_roam(ifp, (cfg->roam_on ? 0 : 1), WL_BEACON_TIMEOUT); if (err) goto default_conf_out; err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype, NULL, NULL); - if (err && err != -EINPROGRESS) + if (err) goto default_conf_out; err = brcmf_dongle_probecap(cfg); if (err) goto default_conf_out; - /* -EINPROGRESS: Call commit handler */ - -default_conf_out: - cfg->dongle_up = true; +default_conf_out: return err; @@ -4501,8 +5004,6 @@ default_conf_out: static s32 __brcmf_cfg80211_up(struct brcmf_if *ifp) { set_bit(BRCMF_VIF_STATUS_READY, &ifp->vif->sme_state); - if (ifp->idx) - return 0; return brcmf_config_dongle(ifp->drvr->config); } @@ -4557,3 +5058,57 @@ s32 brcmf_cfg80211_down(struct net_device *ndev) return err; } +u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state) +{ + struct brcmf_cfg80211_vif *vif; + bool result = 0; + + list_for_each_entry(vif, &cfg->vif_list, list) { + if (test_bit(state, &vif->sme_state)) + result++; + } + return result; +} + +static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event, + u8 action) +{ + u8 evt_action; + + mutex_lock(&event->vif_event_lock); + evt_action = event->action; + mutex_unlock(&event->vif_event_lock); + return evt_action == action; +} + +void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg, + struct brcmf_cfg80211_vif *vif) +{ + struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; + + mutex_lock(&event->vif_event_lock); + event->vif = vif; + event->action = 0; + mutex_unlock(&event->vif_event_lock); +} + +bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg) +{ + struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; + bool armed; + + mutex_lock(&event->vif_event_lock); + armed = event->vif != NULL; + mutex_unlock(&event->vif_event_lock); + + return armed; +} +int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg, + u8 action, ulong timeout) +{ + struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; + + return wait_event_timeout(event->vif_wq, + vif_event_equals(event, action), timeout); +} + diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h index e4d9cc7a8e63..8b5d4989906c 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h @@ -41,6 +41,38 @@ #define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */ #define IE_MAX_LEN 512 +/* IE TLV processing */ +#define TLV_LEN_OFF 1 /* length offset */ +#define TLV_HDR_LEN 2 /* header length */ +#define TLV_BODY_OFF 2 /* body offset */ +#define TLV_OUI_LEN 3 /* oui id length */ + +/* 802.11 Mgmt Packet flags */ +#define BRCMF_VNDR_IE_BEACON_FLAG 0x1 +#define BRCMF_VNDR_IE_PRBRSP_FLAG 0x2 +#define BRCMF_VNDR_IE_ASSOCRSP_FLAG 0x4 +#define BRCMF_VNDR_IE_AUTHRSP_FLAG 0x8 +#define BRCMF_VNDR_IE_PRBREQ_FLAG 0x10 +#define BRCMF_VNDR_IE_ASSOCREQ_FLAG 0x20 +/* vendor IE in IW advertisement protocol ID field */ +#define BRCMF_VNDR_IE_IWAPID_FLAG 0x40 +/* allow custom IE id */ +#define BRCMF_VNDR_IE_CUSTOM_FLAG 0x100 + +/* P2P Action Frames flags (spec ordered) */ +#define BRCMF_VNDR_IE_GONREQ_FLAG 0x001000 +#define BRCMF_VNDR_IE_GONRSP_FLAG 0x002000 +#define BRCMF_VNDR_IE_GONCFM_FLAG 0x004000 +#define BRCMF_VNDR_IE_INVREQ_FLAG 0x008000 +#define BRCMF_VNDR_IE_INVRSP_FLAG 0x010000 +#define BRCMF_VNDR_IE_DISREQ_FLAG 0x020000 +#define BRCMF_VNDR_IE_DISRSP_FLAG 0x040000 +#define BRCMF_VNDR_IE_PRDREQ_FLAG 0x080000 +#define BRCMF_VNDR_IE_PRDRSP_FLAG 0x100000 + +#define BRCMF_VNDR_IE_P2PAF_SHIFT 12 + + /** * enum brcmf_scan_status - dongle scan status * @@ -52,11 +84,19 @@ enum brcmf_scan_status { BRCMF_SCAN_STATUS_ABORT, }; -/* wi-fi mode */ +/** + * enum wl_mode - driver mode of virtual interface. + * + * @WL_MODE_BSS: connects to BSS. + * @WL_MODE_IBSS: operate as ad-hoc. + * @WL_MODE_AP: operate as access-point. + * @WL_MODE_P2P: provide P2P discovery. + */ enum wl_mode { WL_MODE_BSS, WL_MODE_IBSS, - WL_MODE_AP + WL_MODE_AP, + WL_MODE_P2P }; /* dongle configuration */ @@ -108,6 +148,7 @@ struct brcmf_cfg80211_profile { * @BRCMF_VIF_STATUS_READY: ready for operation. * @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress. * @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully. + * @BRCMF_VIF_STATUS_DISCONNECTING: disconnect/disable in progress. * @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation. * @BRCMF_VIF_STATUS_AP_CREATED: AP operation started. */ @@ -115,6 +156,7 @@ enum brcmf_vif_status { BRCMF_VIF_STATUS_READY, BRCMF_VIF_STATUS_CONNECTING, BRCMF_VIF_STATUS_CONNECTED, + BRCMF_VIF_STATUS_DISCONNECTING, BRCMF_VIF_STATUS_AP_CREATING, BRCMF_VIF_STATUS_AP_CREATED }; @@ -122,16 +164,22 @@ enum brcmf_vif_status { /** * struct vif_saved_ie - holds saved IEs for a virtual interface. * + * @probe_req_ie: IE info for probe request. * @probe_res_ie: IE info for probe response. * @beacon_ie: IE info for beacon frame. + * @probe_req_ie_len: IE info length for probe request. * @probe_res_ie_len: IE info length for probe response. * @beacon_ie_len: IE info length for beacon frame. */ struct vif_saved_ie { + u8 probe_req_ie[IE_MAX_LEN]; u8 probe_res_ie[IE_MAX_LEN]; u8 beacon_ie[IE_MAX_LEN]; + u8 assoc_req_ie[IE_MAX_LEN]; + u32 probe_req_ie_len; u32 probe_res_ie_len; u32 beacon_ie_len; + u32 assoc_req_ie_len; }; /** @@ -145,6 +193,7 @@ struct vif_saved_ie { * @sme_state: SME state using enum brcmf_vif_status bits. * @pm_block: power-management blocked. * @list: linked list. + * @mgmt_rx_reg: registered rx mgmt frame types. */ struct brcmf_cfg80211_vif { struct brcmf_if *ifp; @@ -156,6 +205,7 @@ struct brcmf_cfg80211_vif { bool pm_block; struct vif_saved_ie saved_ie; struct list_head list; + u16 mgmt_rx_reg; }; /* association inform */ @@ -189,6 +239,9 @@ struct escan_info { u8 escan_buf[WL_ESCAN_BUF_SIZE]; struct wiphy *wiphy; struct net_device *ndev; + s32 (*run)(struct brcmf_cfg80211_info *cfg, + struct net_device *ndev, + struct cfg80211_scan_request *request, u16 action); }; /** @@ -273,10 +326,27 @@ struct brcmf_pno_scanresults_le { }; /** + * struct brcmf_cfg80211_vif_event - virtual interface event information. + * + * @vif_wq: waitqueue awaiting interface event from firmware. + * @vif_event_lock: protects other members in this structure. + * @vif_complete: completion for net attach. + * @action: either add, change, or delete. + * @vif: virtual interface object related to the event. + */ +struct brcmf_cfg80211_vif_event { + wait_queue_head_t vif_wq; + struct mutex vif_event_lock; + u8 action; + struct brcmf_cfg80211_vif *vif; +}; + +/** * struct brcmf_cfg80211_info - dongle private data of cfg80211 interface * * @wiphy: wiphy object for cfg80211 interface. * @conf: dongle configuration. + * @p2p: peer-to-peer specific information. * @scan_request: cfg80211 scan request object. * @usr_sync: mainly for dongle up/down synchronization. * @bss_list: bss_list holding scanned ap information. @@ -304,10 +374,12 @@ struct brcmf_pno_scanresults_le { * @escan_ioctl_buf: dongle command buffer for escan commands. * @vif_list: linked list of vif instances. * @vif_cnt: number of vif instances. + * @vif_event: vif event signalling. */ struct brcmf_cfg80211_info { struct wiphy *wiphy; struct brcmf_cfg80211_conf *conf; + struct brcmf_p2p_info p2p; struct cfg80211_scan_request *scan_request; struct mutex usr_sync; struct brcmf_scan_results *bss_list; @@ -335,6 +407,21 @@ struct brcmf_cfg80211_info { u8 *escan_ioctl_buf; struct list_head vif_list; u8 vif_cnt; + struct brcmf_cfg80211_vif_event vif_event; + struct completion vif_disabled; +}; + +/** + * struct brcmf_tlv - tag_ID/length/value_buffer tuple. + * + * @id: tag identifier. + * @len: number of bytes in value buffer. + * @data: value buffer. + */ +struct brcmf_tlv { + u8 id; + u8 len; + u8 data[1]; }; static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg) @@ -389,4 +476,26 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg); s32 brcmf_cfg80211_up(struct net_device *ndev); s32 brcmf_cfg80211_down(struct net_device *ndev); +struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, + enum nl80211_iftype type, + bool pm_block); +void brcmf_free_vif(struct brcmf_cfg80211_vif *vif); + +s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, + const u8 *vndr_ie_buf, u32 vndr_ie_len); +s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif); +struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key); +u16 channel_to_chanspec(struct ieee80211_channel *ch); +u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state); +void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg, + struct brcmf_cfg80211_vif *vif); +bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg); +int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg, + u8 action, ulong timeout); +s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, + struct net_device *ndev, + bool aborted, bool fw_abort); +void brcmf_set_mpc(struct net_device *ndev, int mpc); +void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg); + #endif /* _wl_cfg80211_h_ */ diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c index 1de94f30564f..1585cc5bf866 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c @@ -961,7 +961,6 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb, /* if acked then clear bit and free packet */ if ((bindex < AMPDU_TX_BA_MAX_WSIZE) && isset(bitmap, bindex)) { - ini->tx_in_transit--; ini->txretry[index] = 0; /* @@ -990,7 +989,6 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb, if (retry && (ini->txretry[index] < (int)retry_limit)) { int ret; ini->txretry[index]++; - ini->tx_in_transit--; ret = brcms_c_txfifo(wlc, queue, p); /* * We shouldn't be out of space in the DMA @@ -1000,7 +998,6 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb, WARN_ONCE(ret, "queue %d out of txds\n", queue); } else { /* Retry timeout */ - ini->tx_in_transit--; ieee80211_tx_info_clear_status(tx_info); tx_info->status.ampdu_ack_len = 0; tx_info->status.ampdu_len = 1; @@ -1009,8 +1006,8 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb, skb_pull(p, D11_PHY_HDR_LEN); skb_pull(p, D11_TXH_LEN); brcms_dbg_ht(wlc->hw->d11core, - "BA Timeout, seq %d, in_transit %d\n", - seq, ini->tx_in_transit); + "BA Timeout, seq %d\n", + seq); ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p); } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c index a90b72202ec5..10ee314c4229 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c @@ -183,8 +183,7 @@ static bool brcms_c_country_valid(const char *ccode) * chars. */ if (!((0x80 & ccode[0]) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A && - (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A && - ccode[2] == '\0')) + (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A)) return false; /* @@ -670,7 +669,7 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy, struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; const struct ieee80211_reg_rule *rule; - int band, i, ret; + int band, i; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; @@ -685,9 +684,8 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy, continue; if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { - ret = freq_reg_info(wiphy, ch->center_freq, - 0, &rule); - if (ret) + rule = freq_reg_info(wiphy, ch->center_freq); + if (IS_ERR(rule)) continue; if (!(rule->flags & NL80211_RRF_NO_IBSS)) @@ -703,8 +701,8 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy, } } -static int brcms_reg_notifier(struct wiphy *wiphy, - struct regulatory_request *request) +static void brcms_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct brcms_info *wl = hw->priv; @@ -745,8 +743,6 @@ static int brcms_reg_notifier(struct wiphy *wiphy, if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G) wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi, brcms_c_japan_ccode(request->alpha2)); - - return 0; } void brcms_c_regd_init(struct brcms_c_info *wlc) diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index e5fd20994bec..c6451c61407a 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -363,8 +363,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) return -EOPNOTSUPP; } + spin_lock_bh(&wl->lock); + memcpy(wl->pub->cur_etheraddr, vif->addr, sizeof(vif->addr)); wl->mute_tx = false; brcms_c_mute(wl->wlc, false); + spin_unlock_bh(&wl->lock); return 0; } @@ -540,9 +543,8 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ARP_FILTER) { /* Hardware ARP filter address list or state changed */ - brcms_err(core, "%s: arp filtering: enabled %s, count %d" - " (implement)\n", __func__, info->arp_filter_enabled ? - "true" : "false", info->arp_addr_cnt); + brcms_err(core, "%s: arp filtering: %d addresses" + " (implement)\n", __func__, info->arp_addr_cnt); } if (changed & BSS_CHANGED_QOS) { @@ -669,7 +671,9 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw, ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; - case IEEE80211_AMPDU_TX_STOP: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: spin_lock_bh(&wl->lock); brcms_c_ampdu_flush(wl->wlc, sta, tid); spin_unlock_bh(&wl->lock); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 8b5839008af3..8ef02dca8f8c 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -101,8 +101,6 @@ #define DOT11_RTS_LEN 16 #define DOT11_CTS_LEN 10 #define DOT11_BA_BITMAP_LEN 128 -#define DOT11_MIN_BEACON_PERIOD 1 -#define DOT11_MAX_BEACON_PERIOD 0xFFFF #define DOT11_MAXNUMFRAGS 16 #define DOT11_MAX_FRAG_LEN 2346 @@ -2466,6 +2464,7 @@ static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw, static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx) { static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; + u8 *ethaddr = wlc_hw->wlc->pub->cur_etheraddr; if (mute_tx) { /* suspend tx fifos */ @@ -2475,8 +2474,7 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx) brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_VI_FIFO); /* zero the address match register so we do not send ACKs */ - brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, - null_ether_addr); + brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, null_ether_addr); } else { /* resume tx fifos */ brcms_b_tx_fifo_resume(wlc_hw, TX_DATA_FIFO); @@ -2485,8 +2483,7 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx) brcms_b_tx_fifo_resume(wlc_hw, TX_AC_VI_FIFO); /* Restore address */ - brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, - wlc_hw->etheraddr); + brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, ethaddr); } wlc_phy_mute_upd(wlc_hw->band->pi, mute_tx, 0); @@ -3141,8 +3138,7 @@ void brcms_c_reset(struct brcms_c_info *wlc) brcms_c_statsupd(wlc); /* reset our snapshot of macstat counters */ - memset((char *)wlc->core->macstat_snapshot, 0, - sizeof(struct macstat)); + memset(wlc->core->macstat_snapshot, 0, sizeof(struct macstat)); brcms_b_reset(wlc->hw); } @@ -4055,7 +4051,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci, return; } - memset((char *)&acp_shm, 0, sizeof(struct shm_acparams)); + memset(&acp_shm, 0, sizeof(struct shm_acparams)); /* fill in shm ac params struct */ acp_shm.txop = params->txop; /* convert from units of 32us to us for ucode */ @@ -4771,7 +4767,7 @@ static void brcms_c_bss_default_init(struct brcms_c_info *wlc) struct brcms_bss_info *bi = wlc->default_bss; /* init default and target BSS with some sane initial values */ - memset((char *)(bi), 0, sizeof(struct brcms_bss_info)); + memset(bi, 0, sizeof(*bi)); bi->beacon_period = BEACON_INTERVAL_DEFAULT; /* fill the default channel as the first valid channel @@ -5300,7 +5296,7 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config) brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode); /* Clear rateset override */ - memset(&rs, 0, sizeof(struct brcms_c_rateset)); + memset(&rs, 0, sizeof(rs)); switch (gmode) { case GMODE_LEGACY_B: @@ -5523,7 +5519,7 @@ int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs) if (rs->count > BRCMS_NUMRATES) return -ENOBUFS; - memset(&internal_rs, 0, sizeof(struct brcms_c_rateset)); + memset(&internal_rs, 0, sizeof(internal_rs)); /* Copy only legacy rateset section */ internal_rs.count = rs->count; @@ -5549,8 +5545,7 @@ int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs) int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period) { - if (period < DOT11_MIN_BEACON_PERIOD || - period > DOT11_MAX_BEACON_PERIOD) + if (period == 0) return -EINVAL; wlc->default_bss->beacon_period = period; @@ -5627,7 +5622,7 @@ int brcms_c_module_unregister(struct brcms_pub *pub, const char *name, for (i = 0; i < BRCMS_MAXMODULES; i++) { if (!strcmp(wlc->modulecb[i].name, name) && (wlc->modulecb[i].hdl == hdl)) { - memset(&wlc->modulecb[i], 0, sizeof(struct modulecb)); + memset(&wlc->modulecb[i], 0, sizeof(wlc->modulecb[i])); return 0; } } @@ -6447,10 +6442,9 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw, if ((txrate[k]->flags & IEEE80211_TX_RC_MCS) && (!is_mcs_rate(rspec[k]))) { - brcms_err(wlc->hw->d11core, - "wl%d: %s: IEEE80211_TX_" - "RC_MCS != is_mcs_rate(rspec)\n", - wlc->pub->unit, __func__); + brcms_warn(wlc->hw->d11core, + "wl%d: %s: IEEE80211_TX_RC_MCS != is_mcs_rate(rspec)\n", + wlc->pub->unit, __func__); } if (is_mcs_rate(rspec[k])) { @@ -6683,11 +6677,9 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw, (struct ofdm_phy_hdr *) rts_plcp) : rts_plcp[0]) << 8; } else { - memset((char *)txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN); - memset((char *)&txh->rts_frame, 0, - sizeof(struct ieee80211_rts)); - memset((char *)txh->RTSPLCPFallback, 0, - sizeof(txh->RTSPLCPFallback)); + memset(txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN); + memset(&txh->rts_frame, 0, sizeof(struct ieee80211_rts)); + memset(txh->RTSPLCPFallback, 0, sizeof(txh->RTSPLCPFallback)); txh->RTSDurFallback = 0; } @@ -6842,21 +6834,19 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw, wlc->fragthresh[queue] = (u16) newfragthresh; } else { - brcms_err(wlc->hw->d11core, - "wl%d: %s txop invalid " - "for rate %d\n", - wlc->pub->unit, fifo_names[queue], - rspec2rate(rspec[0])); + brcms_warn(wlc->hw->d11core, + "wl%d: %s txop invalid for rate %d\n", + wlc->pub->unit, fifo_names[queue], + rspec2rate(rspec[0])); } if (dur > wlc->edcf_txop[ac]) - brcms_err(wlc->hw->d11core, - "wl%d: %s: %s txop " - "exceeded phylen %d/%d dur %d/%d\n", - wlc->pub->unit, __func__, - fifo_names[queue], - phylen, wlc->fragthresh[queue], - dur, wlc->edcf_txop[ac]); + brcms_warn(wlc->hw->d11core, + "wl%d: %s: %s txop exceeded phylen %d/%d dur %d/%d\n", + wlc->pub->unit, __func__, + fifo_names[queue], + phylen, wlc->fragthresh[queue], + dur, wlc->edcf_txop[ac]); } } @@ -7331,7 +7321,7 @@ brcms_c_bcn_prb_template(struct brcms_c_info *wlc, u16 type, *len = hdr_len + body_len; /* format PHY and MAC headers */ - memset((char *)buf, 0, hdr_len); + memset(buf, 0, hdr_len); plcp = (struct cck_phy_hdr *) buf; @@ -7402,9 +7392,13 @@ brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc, struct brcms_bss_cfg *cfg, bool suspend) { - u16 prb_resp[BCN_TMPL_LEN / 2]; + u16 *prb_resp; int len = BCN_TMPL_LEN; + prb_resp = kmalloc(BCN_TMPL_LEN, GFP_ATOMIC); + if (!prb_resp) + return; + /* * write the probe response to hardware, or save in * the config structure @@ -7438,6 +7432,8 @@ brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc, if (suspend) brcms_c_enable_mac(wlc); + + kfree(prb_resp); } void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend) @@ -7617,7 +7613,7 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound) uint n = 0; uint bound_limit = bound ? RXBND : -1; - bool morepending; + bool morepending = false; skb_queue_head_init(&recv_frames); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/scb.h b/drivers/net/wireless/brcm80211/brcmsmac/scb.h index 51c79c7239b7..3a3d73699f83 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/scb.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/scb.h @@ -36,7 +36,6 @@ /* structure to store per-tid state for the ampdu initiator */ struct scb_ampdu_tid_ini { - u8 tx_in_transit; /* number of pending mpdus in transit in driver */ u8 tid; /* initiator tid for easy lookup */ /* tx retry count; indexed by seq modulo */ u8 txretry[AMPDU_TX_BA_MAX_WSIZE]; diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index c6ea995750db..dd9a18f8dbca 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -376,7 +376,7 @@ int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac) entry = kmalloc(sizeof(struct mac_entry), GFP_KERNEL); if (entry == NULL) - return -1; + return -ENOMEM; memcpy(entry->addr, mac, ETH_ALEN); diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index b3ab7b7becae..cb066f62879d 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -4464,13 +4464,10 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv) return err; } - priv->tx_buffers = - kmalloc(TX_PENDED_QUEUE_LENGTH * sizeof(struct ipw2100_tx_packet), - GFP_ATOMIC); + priv->tx_buffers = kmalloc_array(TX_PENDED_QUEUE_LENGTH, + sizeof(struct ipw2100_tx_packet), + GFP_ATOMIC); if (!priv->tx_buffers) { - printk(KERN_ERR DRV_NAME - ": %s: alloc failed form tx buffers.\n", - priv->net_dev->name); bd_queue_free(priv, &priv->tx_queue); return -ENOMEM; } diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 2c2d6db0536c..d96257b79a84 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -11320,7 +11320,6 @@ static int ipw_up(struct ipw_priv *priv) if (!(priv->config & CFG_CUSTOM_MAC)) eeprom_parse_mac(priv, priv->mac_addr); memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); - memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN); ipw_set_geo(priv); diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c index 3726cd6fcd75..3630a41df50d 100644 --- a/drivers/net/wireless/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/iwlegacy/3945-mac.c @@ -572,26 +572,11 @@ il3945_tx_skb(struct il_priv *il, il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id); /* Total # bytes to be transmitted */ - len = (u16) skb->len; - tx_cmd->len = cpu_to_le16(len); + tx_cmd->len = cpu_to_le16((u16) skb->len); - il_update_stats(il, true, fc, len); tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; - if (!ieee80211_has_morefrags(hdr->frame_control)) { - txq->need_update = 1; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } - - D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence)); - D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); - il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd)); - il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, - ieee80211_hdrlen(fc)); - /* * Use the first empty entry in this queue's command buffer array * to contain the Tx command and MAC header concatenated together @@ -610,14 +595,8 @@ il3945_tx_skb(struct il_priv *il, * within command buffer array. */ txcmd_phys = pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE); - /* we do not map meta data ... so we can safely access address to - * provide to unmap command*/ - dma_unmap_addr_set(out_meta, mapping, txcmd_phys); - dma_unmap_len_set(out_meta, len, len); - - /* Add buffer containing Tx command and MAC(!) header to TFD's - * first entry */ - il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0); + if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys))) + goto drop_unlock; /* Set up TFD's 2nd entry to point directly to remainder of skb, * if any (802.11 null frames have no payload). */ @@ -626,10 +605,34 @@ il3945_tx_skb(struct il_priv *il, phys_addr = pci_map_single(il->pci_dev, skb->data + hdr_len, len, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) + goto drop_unlock; + } + + /* Add buffer containing Tx command and MAC(!) header to TFD's + * first entry */ + il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0); + dma_unmap_addr_set(out_meta, mapping, txcmd_phys); + dma_unmap_len_set(out_meta, len, len); + if (len) il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0, U32_PAD(len)); + + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + } else { + wait_write_ptr = 1; + txq->need_update = 0; } + il_update_stats(il, true, fc, skb->len); + + D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence)); + D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); + il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd)); + il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, + ieee80211_hdrlen(fc)); + /* Tell device the write idx *just past* this latest filled TFD */ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); il_txq_update_write_ptr(il, txq); @@ -1001,12 +1004,12 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority) struct list_head *element; struct il_rx_buf *rxb; struct page *page; + dma_addr_t page_dma; unsigned long flags; gfp_t gfp_mask = priority; while (1) { spin_lock_irqsave(&rxq->lock, flags); - if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); return; @@ -1035,26 +1038,34 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority) break; } + /* Get physical address of RB/SKB */ + page_dma = + pci_map_page(il->pci_dev, page, 0, + PAGE_SIZE << il->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + + if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) { + __free_pages(page, il->hw_params.rx_page_order); + break; + } + spin_lock_irqsave(&rxq->lock, flags); + if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); + pci_unmap_page(il->pci_dev, page_dma, + PAGE_SIZE << il->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); __free_pages(page, il->hw_params.rx_page_order); return; } + element = rxq->rx_used.next; rxb = list_entry(element, struct il_rx_buf, list); list_del(element); - spin_unlock_irqrestore(&rxq->lock, flags); rxb->page = page; - /* Get physical address of RB/SKB */ - rxb->page_dma = - pci_map_page(il->pci_dev, page, 0, - PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - - spin_lock_irqsave(&rxq->lock, flags); - + rxb->page_dma = page_dma; list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; il->alloc_rxb_page++; @@ -1284,8 +1295,15 @@ il3945_rx_handle(struct il_priv *il) pci_map_page(il->pci_dev, rxb->page, 0, PAGE_SIZE << il->hw_params. rx_page_order, PCI_DMA_FROMDEVICE); - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; + if (unlikely(pci_dma_mapping_error(il->pci_dev, + rxb->page_dma))) { + __il_free_pages(il, rxb->page); + rxb->page = NULL; + list_add_tail(&rxb->list, &rxq->rx_used); + } else { + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } } else list_add_tail(&rxb->list, &rxq->rx_used); @@ -3474,6 +3492,7 @@ struct ieee80211_ops il3945_mac_ops = { .sta_add = il3945_mac_sta_add, .sta_remove = il_mac_sta_remove, .tx_last_beacon = il_mac_tx_last_beacon, + .flush = il_mac_flush, }; static int @@ -3548,7 +3567,8 @@ il3945_setup_mac(struct il_priv *il) hw->vif_data_size = sizeof(struct il_vif_priv); /* Tell mac80211 our characteristics */ - hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT; + hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT | + IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); @@ -3557,6 +3577,8 @@ il3945_setup_mac(struct il_priv *il) WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS | WIPHY_FLAG_IBSS_RSN; + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; /* we create the 802.11 header and a zero-length SSID element */ hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2; diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index c3fbf6717564..7941eb3a0166 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -319,6 +319,7 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority) struct list_head *element; struct il_rx_buf *rxb; struct page *page; + dma_addr_t page_dma; unsigned long flags; gfp_t gfp_mask = priority; @@ -356,33 +357,35 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority) return; } + /* Get physical address of the RB */ + page_dma = + pci_map_page(il->pci_dev, page, 0, + PAGE_SIZE << il->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) { + __free_pages(page, il->hw_params.rx_page_order); + break; + } + spin_lock_irqsave(&rxq->lock, flags); if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); + pci_unmap_page(il->pci_dev, page_dma, + PAGE_SIZE << il->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); __free_pages(page, il->hw_params.rx_page_order); return; } + element = rxq->rx_used.next; rxb = list_entry(element, struct il_rx_buf, list); list_del(element); - spin_unlock_irqrestore(&rxq->lock, flags); - BUG_ON(rxb->page); - rxb->page = page; - /* Get physical address of the RB */ - rxb->page_dma = - pci_map_page(il->pci_dev, page, 0, - PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); - - spin_lock_irqsave(&rxq->lock, flags); + rxb->page = page; + rxb->page_dma = page_dma; list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; il->alloc_rxb_page++; @@ -725,6 +728,16 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) if (rate_n_flags & RATE_MCS_SGI_MSK) rx_status.flag |= RX_FLAG_SHORT_GI; + if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) { + /* We know which subframes of an A-MPDU belong + * together since we get a single PHY response + * from the firmware for all of them. + */ + + rx_status.flag |= RX_FLAG_AMPDU_DETAILS; + rx_status.ampdu_reference = il->_4965.ampdu_ref; + } + il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb, &rx_status); } @@ -736,6 +749,7 @@ il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb) { struct il_rx_pkt *pkt = rxb_addr(rxb); il->_4965.last_phy_res_valid = true; + il->_4965.ampdu_ref++; memcpy(&il->_4965.last_phy_res, pkt->u.raw, sizeof(struct il_rx_phy_res)); } @@ -1779,8 +1793,7 @@ il4965_tx_skb(struct il_priv *il, memcpy(tx_cmd->hdr, hdr, hdr_len); /* Total # bytes to be transmitted */ - len = (u16) skb->len; - tx_cmd->len = cpu_to_le16(len); + tx_cmd->len = cpu_to_le16((u16) skb->len); if (info->control.hw_key) il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id); @@ -1790,7 +1803,6 @@ il4965_tx_skb(struct il_priv *il, il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc); - il_update_stats(il, true, fc, len); /* * Use the first empty entry in this queue's command buffer array * to contain the Tx command and MAC header concatenated together @@ -1812,18 +1824,8 @@ il4965_tx_skb(struct il_priv *il, txcmd_phys = pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, PCI_DMA_BIDIRECTIONAL); - dma_unmap_addr_set(out_meta, mapping, txcmd_phys); - dma_unmap_len_set(out_meta, len, firstlen); - /* Add buffer containing Tx command and MAC(!) header to TFD's - * first entry */ - il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0); - - if (!ieee80211_has_morefrags(hdr->frame_control)) { - txq->need_update = 1; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } + if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys))) + goto drop_unlock; /* Set up TFD's 2nd entry to point directly to remainder of skb, * if any (802.11 null frames have no payload). */ @@ -1832,8 +1834,24 @@ il4965_tx_skb(struct il_priv *il, phys_addr = pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) + goto drop_unlock; + } + + /* Add buffer containing Tx command and MAC(!) header to TFD's + * first entry */ + il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0); + dma_unmap_addr_set(out_meta, mapping, txcmd_phys); + dma_unmap_len_set(out_meta, len, firstlen); + if (secondlen) il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0, 0); + + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + } else { + wait_write_ptr = 1; + txq->need_update = 0; } scratch_phys = @@ -1846,6 +1864,8 @@ il4965_tx_skb(struct il_priv *il, tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys); + il_update_stats(il, true, fc, skb->len); + D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence)); D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd)); @@ -4281,8 +4301,16 @@ il4965_rx_handle(struct il_priv *il) pci_map_page(il->pci_dev, rxb->page, 0, PAGE_SIZE << il->hw_params. rx_page_order, PCI_DMA_FROMDEVICE); - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; + + if (unlikely(pci_dma_mapping_error(il->pci_dev, + rxb->page_dma))) { + __il_free_pages(il, rxb->page); + rxb->page = NULL; + list_add_tail(&rxb->list, &rxq->rx_used); + } else { + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } } else list_add_tail(&rxb->list, &rxq->rx_used); @@ -5711,9 +5739,9 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) /* Tell mac80211 our characteristics */ hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | - IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT | - IEEE80211_HW_REPORTS_TX_ACK_STATUS; - + IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | + IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS; if (il->cfg->sku & IL_SKU_N) hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | @@ -5968,7 +5996,9 @@ il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, D_HT("start Tx\n"); ret = il4965_tx_agg_start(il, vif, sta, tid, ssn); break; - case IEEE80211_AMPDU_TX_STOP: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: D_HT("stop Tx\n"); ret = il4965_tx_agg_stop(il, vif, sta, tid); if (test_bit(S_EXIT_PENDING, &il->status)) @@ -6306,6 +6336,7 @@ const struct ieee80211_ops il4965_mac_ops = { .sta_remove = il_mac_sta_remove, .channel_switch = il4965_mac_channel_switch, .tx_last_beacon = il_mac_tx_last_beacon, + .flush = il_mac_flush, }; static int @@ -6553,6 +6584,7 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) il4965_prepare_card_hw(il); if (!il->hw_ready) { IL_WARN("Failed, HW not ready\n"); + err = -EIO; goto out_iounmap; } @@ -6569,9 +6601,6 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_free_eeprom; - if (err) - goto out_free_eeprom; - /* extract MAC Address */ il4965_eeprom_get_mac(il, il->addresses[0].addr); D_INFO("MAC address: %pM\n", il->addresses[0].addr); diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c index f3b8e91aa3dc..e8324b5e5bfe 100644 --- a/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/iwlegacy/4965-rs.c @@ -1183,8 +1183,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta, if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) return -1; - if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) == - WLAN_HT_CAP_SM_PS_STATIC) + if (sta->smps_mode == IEEE80211_SMPS_STATIC) return -1; /* Need both Tx chains/antennas to support MIMO */ diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c index 5db11714e047..91eb2d07fdb8 100644 --- a/drivers/net/wireless/iwlegacy/4965.c +++ b/drivers/net/wireless/iwlegacy/4965.c @@ -1748,7 +1748,6 @@ static void il4965_post_associate(struct il_priv *il) { struct ieee80211_vif *vif = il->vif; - struct ieee80211_conf *conf = NULL; int ret = 0; if (!vif || !il->is_open) @@ -1759,8 +1758,6 @@ il4965_post_associate(struct il_priv *il) il_scan_cancel_timeout(il, 200); - conf = &il->hw->conf; - il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; il_commit_rxon(il); diff --git a/drivers/net/wireless/iwlegacy/commands.h b/drivers/net/wireless/iwlegacy/commands.h index 25dd7d28d022..3b6c99400892 100644 --- a/drivers/net/wireless/iwlegacy/commands.h +++ b/drivers/net/wireless/iwlegacy/commands.h @@ -1134,8 +1134,9 @@ struct il_wep_cmd { #define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1) #define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2) #define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3) -#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0 +#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0x70 #define RX_RES_PHY_FLAGS_ANTENNA_POS 4 +#define RX_RES_PHY_FLAGS_AGG_MSK cpu_to_le16(1 << 7) #define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) #define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 90b8970eadf0..e006ea831320 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c @@ -1830,32 +1830,30 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta) { struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; __le32 sta_flags; - u8 mimo_ps_mode; if (!sta || !sta_ht_inf->ht_supported) goto done; - mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; D_ASSOC("spatial multiplexing power save mode: %s\n", - (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" : - (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" : + (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" : + (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" : "disabled"); sta_flags = il->stations[idx].sta.station_flags; sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); - switch (mimo_ps_mode) { - case WLAN_HT_CAP_SM_PS_STATIC: + switch (sta->smps_mode) { + case IEEE80211_SMPS_STATIC: sta_flags |= STA_FLG_MIMO_DIS_MSK; break; - case WLAN_HT_CAP_SM_PS_DYNAMIC: + case IEEE80211_SMPS_DYNAMIC: sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; break; - case WLAN_HT_CAP_SM_PS_DISABLED: + case IEEE80211_SMPS_OFF: break; default: - IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode); + IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode); break; } @@ -3162,18 +3160,23 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) idx, il->cmd_queue); } #endif - txq->need_update = 1; - - if (il->ops->txq_update_byte_cnt_tbl) - /* Set up entry in queue's byte count circular buffer */ - il->ops->txq_update_byte_cnt_tbl(il, txq, 0); phys_addr = pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size, PCI_DMA_BIDIRECTIONAL); + if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) { + idx = -ENOMEM; + goto out; + } dma_unmap_addr_set(out_meta, mapping, phys_addr); dma_unmap_len_set(out_meta, len, fix_size); + txq->need_update = 1; + + if (il->ops->txq_update_byte_cnt_tbl) + /* Set up entry in queue's byte count circular buffer */ + il->ops->txq_update_byte_cnt_tbl(il, txq, 0); + il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1, U32_PAD(cmd->len)); @@ -3181,6 +3184,7 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); il_txq_update_write_ptr(il, txq); +out: spin_unlock_irqrestore(&il->hcmd_lock, flags); return idx; } @@ -4700,6 +4704,42 @@ out: } EXPORT_SYMBOL(il_mac_change_interface); +void +il_mac_flush(struct ieee80211_hw *hw, bool drop) +{ + struct il_priv *il = hw->priv; + unsigned long timeout = jiffies + msecs_to_jiffies(500); + int i; + + mutex_lock(&il->mutex); + D_MAC80211("enter\n"); + + if (il->txq == NULL) + goto out; + + for (i = 0; i < il->hw_params.max_txq_num; i++) { + struct il_queue *q; + + if (i == il->cmd_queue) + continue; + + q = &il->txq[i].q; + if (q->read_ptr == q->write_ptr) + continue; + + if (time_after(jiffies, timeout)) { + IL_ERR("Failed to flush queue %d\n", q->id); + break; + } + + msleep(20); + } +out: + D_MAC80211("leave\n"); + mutex_unlock(&il->mutex); +} +EXPORT_SYMBOL(il_mac_flush); + /* * On every watchdog tick we check (latest) time stamp. If it does not * change during timeout period and queue is not empty we reset firmware. diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h index a9a569f432fb..96f2025d936e 100644 --- a/drivers/net/wireless/iwlegacy/common.h +++ b/drivers/net/wireless/iwlegacy/common.h @@ -1356,6 +1356,7 @@ struct il_priv { struct { struct il_rx_phy_res last_phy_res; bool last_phy_res_valid; + u32 ampdu_ref; struct completion firmware_loading_complete; @@ -1723,6 +1724,7 @@ void il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_iftype newtype, bool newp2p); +void il_mac_flush(struct ieee80211_hw *hw, bool drop); int il_alloc_txq_mem(struct il_priv *il); void il_free_txq_mem(struct il_priv *il); diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 5cf43236421e..ba319cba3f1e 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -43,8 +43,20 @@ config IWLWIFI module will be called iwlwifi. config IWLDVM - tristate "Intel Wireless WiFi" + tristate "Intel Wireless WiFi DVM Firmware support" depends on IWLWIFI + help + This is the driver supporting the DVM firmware which is + currently the only firmware available for existing devices. + +config IWLMVM + tristate "Intel Wireless WiFi MVM Firmware support" + depends on IWLWIFI + help + This is the driver supporting the MVM firmware which is + currently only available for 7000 series devices. + + Say yes if you have such a device. menu "Debugging Options" depends on IWLWIFI diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile index 170ec330d2a9..6c7800044a04 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/iwlwifi/Makefile @@ -5,8 +5,10 @@ iwlwifi-objs += iwl-drv.o iwlwifi-objs += iwl-debug.o iwlwifi-objs += iwl-notif-wait.o iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o +iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o +iwlwifi-objs += pcie/7000.o iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o @@ -15,5 +17,6 @@ ccflags-y += -D__CHECK_ENDIAN__ -I$(src) obj-$(CONFIG_IWLDVM) += dvm/ +obj-$(CONFIG_IWLMVM) += mvm/ CFLAGS_iwl-devtrace.o := -I$(src) diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h index 33b3ad2e546b..41ec27cb6efe 100644 --- a/drivers/net/wireless/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/iwlwifi/dvm/agn.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -338,7 +338,7 @@ int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx, bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct ieee80211_sta_ht_cap *ht_cap); + struct ieee80211_sta *sta); static inline int iwl_sta_id(struct ieee80211_sta *sta) { diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c index de54713b680c..6468de8634b0 100644 --- a/drivers/net/wireless/iwlwifi/dvm/calib.c +++ b/drivers/net/wireless/iwlwifi/dvm/calib.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h index 2349f393cc42..65e920cab2b7 100644 --- a/drivers/net/wireless/iwlwifi/dvm/calib.h +++ b/drivers/net/wireless/iwlwifi/dvm/calib.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h index 71ab76b2b39d..84e2c0fcfef6 100644 --- a/drivers/net/wireless/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/iwlwifi/dvm/commands.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1403,6 +1403,7 @@ enum { #define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */ #define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */ +#define AGG_TX_TRY_POS 12 #define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \ AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \ @@ -3695,7 +3696,7 @@ struct iwl_bt_uart_msg { u8 frame5; u8 frame6; u8 frame7; -} __attribute__((packed)); +} __packed; struct iwl_bt_coex_profile_notif { struct iwl_bt_uart_msg last_bt_uart_msg; @@ -3703,7 +3704,7 @@ struct iwl_bt_coex_profile_notif { u8 bt_traffic_load; /* 0 .. 3? */ u8 bt_ci_compliance; /* 0 - not complied, 1 - complied */ u8 reserved; -} __attribute__((packed)); +} __packed; #define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS 0 #define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_MSK 0x1 @@ -3752,7 +3753,7 @@ enum bt_coex_prio_table_priorities { struct iwl_bt_coex_prio_table_cmd { u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX]; -} __attribute__((packed)); +} __packed; #define IWL_BT_COEX_ENV_CLOSE 0 #define IWL_BT_COEX_ENV_OPEN 1 @@ -3764,7 +3765,7 @@ struct iwl_bt_coex_prot_env_cmd { u8 action; /* 0 = closed, 1 = open */ u8 type; /* 0 .. 15 */ u8 reserved[2]; -} __attribute__((packed)); +} __packed; /* * REPLY_D3_CONFIG @@ -3897,6 +3898,24 @@ struct iwlagn_wowlan_kek_kck_material_cmd { __le64 replay_ctr; } __packed; +#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87 + +/* + * REPLY_WOWLAN_GET_STATUS = 0xe5 + */ +struct iwlagn_wowlan_status { + __le64 replay_ctr; + __le32 rekey_status; + __le32 wakeup_reason; + u8 pattern_number; + u8 reserved1; + __le16 qos_seq_ctr[8]; + __le16 non_qos_seq_ctr; + __le16 reserved2; + union iwlagn_all_tsc_rsc tsc_rsc; + __le16 reserved3; +} __packed; + /* * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification) */ diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c index 5b9533eef54d..20806cae11b7 100644 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c @@ -2,7 +2,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -157,7 +157,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, sram = priv->dbgfs_sram_offset & ~0x3; /* read the first u32 from sram */ - val = iwl_read_targ_mem(priv->trans, sram); + val = iwl_trans_read_mem32(priv->trans, sram); for (; len; len--) { /* put the address at the start of every line */ @@ -176,7 +176,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, if (++offset == 4) { sram += 4; offset = 0; - val = iwl_read_targ_mem(priv->trans, sram); + val = iwl_trans_read_mem32(priv->trans, sram); } /* put in extra spaces and split lines for human readability */ diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h index 2653a891cc7e..71ea77576d22 100644 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/iwlwifi/dvm/dev.h @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c index 8c72be3f37c1..15cca2ef9294 100644 --- a/drivers/net/wireless/iwlwifi/dvm/devices.c +++ b/drivers/net/wireless/iwlwifi/dvm/devices.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/iwlwifi/dvm/led.c b/drivers/net/wireless/iwlwifi/dvm/led.c index bf479f709091..33c7e15d24f5 100644 --- a/drivers/net/wireless/iwlwifi/dvm/led.c +++ b/drivers/net/wireless/iwlwifi/dvm/led.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -69,7 +69,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = { /* Set led register off */ void iwlagn_led_enable(struct iwl_priv *priv) { - iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TRUN_ON); + iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON); } /* diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/iwlwifi/dvm/led.h index b02a853103d3..8749dcfe695f 100644 --- a/drivers/net/wireless/iwlwifi/dvm/led.h +++ b/drivers/net/wireless/iwlwifi/dvm/led.h @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index 6ff46605ad4f..86ea5f4c3939 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -2,7 +2,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index 3163e0f38c25..323e4a33fcac 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -145,14 +145,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, /* Tell mac80211 our characteristics */ hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | - IEEE80211_HW_NEED_DTIM_PERIOD | + IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_QUEUE_CONTROL | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS | - IEEE80211_HW_WANT_MONITOR_VIF | - IEEE80211_HW_SCAN_WHILE_IDLE; + IEEE80211_HW_WANT_MONITOR_VIF; hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT; @@ -206,7 +205,8 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, #ifdef CONFIG_PM_SLEEP if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && - priv->trans->ops->wowlan_suspend && + priv->trans->ops->d3_suspend && + priv->trans->ops->d3_resume && device_can_wakeup(priv->trans->dev)) { hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | @@ -426,7 +426,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw, if (ret) goto error; - iwl_trans_wowlan_suspend(priv->trans); + iwl_trans_d3_suspend(priv->trans); goto out; @@ -441,54 +441,154 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw, return ret; } +struct iwl_resume_data { + struct iwl_priv *priv; + struct iwlagn_wowlan_status *cmd; + bool valid; +}; + +static bool iwl_resume_status_fn(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_resume_data *resume_data = data; + struct iwl_priv *priv = resume_data->priv; + u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + + if (len - 4 != sizeof(*resume_data->cmd)) { + IWL_ERR(priv, "rx wrong size data\n"); + return true; + } + memcpy(resume_data->cmd, pkt->data, sizeof(*resume_data->cmd)); + resume_data->valid = true; + + return true; +} + static int iwlagn_mac_resume(struct ieee80211_hw *hw) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; struct ieee80211_vif *vif; - unsigned long flags; - u32 base, status = 0xffffffff; - int ret = -EIO; + u32 base; + int ret; + enum iwl_d3_status d3_status; + struct error_table_start { + /* cf. struct iwl_error_event_table */ + u32 valid; + u32 error_id; + } err_info; + struct iwl_notification_wait status_wait; + static const u8 status_cmd[] = { + REPLY_WOWLAN_GET_STATUS, + }; + struct iwlagn_wowlan_status status_data = {}; + struct iwl_resume_data resume_data = { + .priv = priv, + .cmd = &status_data, + .valid = false, + }; + struct cfg80211_wowlan_wakeup wakeup = { + .pattern_idx = -1, + }; +#ifdef CONFIG_IWLWIFI_DEBUGFS + const struct fw_img *img; +#endif IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); - iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + /* we'll clear ctx->vif during iwlagn_prepare_restart() */ + vif = ctx->vif; + + ret = iwl_trans_d3_resume(priv->trans, &d3_status); + if (ret) + goto out_unlock; + + if (d3_status != IWL_D3_STATUS_ALIVE) { + IWL_INFO(priv, "Device was reset during suspend\n"); + goto out_unlock; + } base = priv->device_pointers.error_event_table; - if (iwlagn_hw_valid_rtc_data_addr(base)) { - spin_lock_irqsave(&priv->trans->reg_lock, flags); - ret = iwl_grab_nic_access_silent(priv->trans); - if (likely(ret == 0)) { - iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base); - status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); - iwl_release_nic_access(priv->trans); + if (!iwlagn_hw_valid_rtc_data_addr(base)) { + IWL_WARN(priv, "Invalid error table during resume!\n"); + goto out_unlock; + } + + iwl_trans_read_mem_bytes(priv->trans, base, + &err_info, sizeof(err_info)); + + if (err_info.valid) { + IWL_INFO(priv, "error table is valid (%d, 0x%x)\n", + err_info.valid, err_info.error_id); + if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { + wakeup.rfkill_release = true; + ieee80211_report_wowlan_wakeup(vif, &wakeup, + GFP_KERNEL); } - spin_unlock_irqrestore(&priv->trans->reg_lock, flags); + goto out_unlock; + } #ifdef CONFIG_IWLWIFI_DEBUGFS - if (ret == 0) { - const struct fw_img *img; - - img = &(priv->fw->img[IWL_UCODE_WOWLAN]); - if (!priv->wowlan_sram) { - priv->wowlan_sram = - kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len, - GFP_KERNEL); - } + img = &priv->fw->img[IWL_UCODE_WOWLAN]; + if (!priv->wowlan_sram) + priv->wowlan_sram = + kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len, + GFP_KERNEL); + + if (priv->wowlan_sram) + iwl_trans_read_mem(priv->trans, 0x800000, + priv->wowlan_sram, + img->sec[IWL_UCODE_SECTION_DATA].len / 4); +#endif + + /* + * This is very strange. The GET_STATUS command is sent but the device + * doesn't reply properly, it seems it doesn't close the RBD so one is + * always left open ... As a result, we need to send another command + * and have to reset the driver afterwards. As we need to switch to + * runtime firmware again that'll happen. + */ - if (priv->wowlan_sram) - _iwl_read_targ_mem_dwords( - priv->trans, 0x800000, - priv->wowlan_sram, - img->sec[IWL_UCODE_SECTION_DATA].len / 4); + iwl_init_notification_wait(&priv->notif_wait, &status_wait, status_cmd, + ARRAY_SIZE(status_cmd), iwl_resume_status_fn, + &resume_data); + + iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_GET_STATUS, CMD_ASYNC, 0, NULL); + iwl_dvm_send_cmd_pdu(priv, REPLY_ECHO, CMD_ASYNC, 0, NULL); + /* an RBD is left open in the firmware now! */ + + ret = iwl_wait_notification(&priv->notif_wait, &status_wait, HZ/5); + if (ret) + goto out_unlock; + + if (resume_data.valid && priv->contexts[IWL_RXON_CTX_BSS].vif) { + u32 reasons = le32_to_cpu(status_data.wakeup_reason); + struct cfg80211_wowlan_wakeup *wakeup_report; + + IWL_INFO(priv, "WoWLAN wakeup reason(s): 0x%.8x\n", reasons); + + if (reasons) { + if (reasons & IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET) + wakeup.magic_pkt = true; + if (reasons & IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH) + wakeup.pattern_idx = status_data.pattern_number; + if (reasons & (IWLAGN_WOWLAN_WAKEUP_BEACON_MISS | + IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE)) + wakeup.disconnect = true; + if (reasons & IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL) + wakeup.gtk_rekey_failure = true; + if (reasons & IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ) + wakeup.eap_identity_req = true; + if (reasons & IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE) + wakeup.four_way_handshake = true; + wakeup_report = &wakeup; + } else { + wakeup_report = NULL; } -#endif - } - /* we'll clear ctx->vif during iwlagn_prepare_restart() */ - vif = ctx->vif; + ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); + } priv->wowlan = false; @@ -498,6 +598,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) iwl_connection_init_rx_config(priv, ctx); iwlagn_set_rxon_chain(priv, ctx); + out_unlock: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); @@ -520,9 +621,6 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw, { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, - ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); - if (iwlagn_tx_skb(priv, control->sta, skb)) ieee80211_free_txskb(hw, skb); } @@ -679,7 +777,9 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, IWL_DEBUG_HT(priv, "start Tx\n"); ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); break; - case IEEE80211_AMPDU_TX_STOP: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: IWL_DEBUG_HT(priv, "stop Tx\n"); ret = iwlagn_tx_agg_stop(priv, vif, sta, tid); if ((ret == 0) && (priv->agg_tids_count > 0)) { @@ -1154,6 +1254,7 @@ static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) } static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, enum ieee80211_rssi_event rssi_event) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c index faa05932efae..b9e3517652d6 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -353,11 +353,8 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base, ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); /* Make sure device is powered up for SRAM reads */ - spin_lock_irqsave(&priv->trans->reg_lock, reg_flags); - if (unlikely(!iwl_grab_nic_access(priv->trans))) { - spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags); + if (!iwl_trans_grab_nic_access(priv->trans, false, ®_flags)) return; - } /* Set starting address; reads will auto-increment */ iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr); @@ -388,8 +385,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base, } } /* Allow device to power down */ - iwl_release_nic_access(priv->trans); - spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags); + iwl_trans_release_nic_access(priv->trans, ®_flags); } static void iwl_continuous_event_trace(struct iwl_priv *priv) @@ -408,7 +404,8 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv) base = priv->device_pointers.log_event_table; if (iwlagn_hw_valid_rtc_data_addr(base)) { - iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read)); + iwl_trans_read_mem_bytes(priv->trans, base, + &read, sizeof(read)); capacity = read.capacity; mode = read.mode; num_wraps = read.wrap_counter; @@ -1627,7 +1624,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv) } /*TODO: Update dbgfs with ISR error stats obtained below */ - iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table)); + iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); @@ -1716,9 +1713,8 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, ptr = base + EVENT_START_OFFSET + (start_idx * event_size); /* Make sure device is powered up for SRAM reads */ - spin_lock_irqsave(&trans->reg_lock, reg_flags); - if (unlikely(!iwl_grab_nic_access(trans))) - goto out_unlock; + if (!iwl_trans_grab_nic_access(trans, false, ®_flags)) + return pos; /* Set starting address; reads will auto-increment */ iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr); @@ -1756,9 +1752,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, } /* Allow device to power down */ - iwl_release_nic_access(trans); -out_unlock: - spin_unlock_irqrestore(&trans->reg_lock, reg_flags); + iwl_trans_release_nic_access(trans, ®_flags); return pos; } @@ -1835,10 +1829,10 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, } /* event log header */ - capacity = iwl_read_targ_mem(trans, base); - mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32))); - num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32))); - next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32))); + capacity = iwl_trans_read_mem32(trans, base); + mode = iwl_trans_read_mem32(trans, base + (1 * sizeof(u32))); + num_wraps = iwl_trans_read_mem32(trans, base + (2 * sizeof(u32))); + next_entry = iwl_trans_read_mem32(trans, base + (3 * sizeof(u32))); if (capacity > logsize) { IWL_ERR(priv, "Log capacity %d is bogus, limit to %d " @@ -1990,13 +1984,13 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode) struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); /* SKU Control */ - iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | - CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP, - (CSR_HW_REV_STEP(priv->trans->hw_rev) << - CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) | - (CSR_HW_REV_DASH(priv->trans->hw_rev) << - CSR_HW_IF_CONFIG_REG_POS_MAC_DASH)); + iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | + CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP, + (CSR_HW_REV_STEP(priv->trans->hw_rev) << + CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) | + (CSR_HW_REV_DASH(priv->trans->hw_rev) << + CSR_HW_IF_CONFIG_REG_POS_MAC_DASH)); /* write radio config values to register */ if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) { @@ -2008,10 +2002,11 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode) priv->nvm_data->radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; - iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | - CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | - CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, reg_val); + iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | + CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | + CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, + reg_val); IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n", priv->nvm_data->radio_cfg_type, diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c index 518cf3715809..bd69018d07a9 100644 --- a/drivers/net/wireless/iwlwifi/dvm/power.c +++ b/drivers/net/wireless/iwlwifi/dvm/power.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. diff --git a/drivers/net/wireless/iwlwifi/dvm/power.h b/drivers/net/wireless/iwlwifi/dvm/power.h index a2cee7f04848..7b03e1342d47 100644 --- a/drivers/net/wireless/iwlwifi/dvm/power.h +++ b/drivers/net/wireless/iwlwifi/dvm/power.h @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c index f3dd0da60d8a..abe304267261 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -411,8 +411,9 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, * BT traffic, as they would just be disrupted by BT. */ if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) { - IWL_ERR(priv, "BT traffic (%d), no aggregation allowed\n", - priv->bt_traffic_load); + IWL_DEBUG_COEX(priv, + "BT traffic (%d), no aggregation allowed\n", + priv->bt_traffic_load); return ret; } @@ -1288,8 +1289,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv, if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) return -1; - if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) - == WLAN_HT_CAP_SM_PS_STATIC) + if (sta->smps_mode == IEEE80211_SMPS_STATIC) return -1; /* Need both Tx chains/antennas to support MIMO */ @@ -1304,7 +1304,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv, tbl->max_search = IWL_MAX_SEARCH; rate_mask = lq_sta->active_mimo2_rate; - if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) + if (iwl_is_ht40_tx_allowed(priv, ctx, sta)) tbl->is_ht40 = 1; else tbl->is_ht40 = 0; @@ -1344,8 +1344,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv, if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) return -1; - if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) - == WLAN_HT_CAP_SM_PS_STATIC) + if (sta->smps_mode == IEEE80211_SMPS_STATIC) return -1; /* Need both Tx chains/antennas to support MIMO */ @@ -1360,7 +1359,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv, tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; rate_mask = lq_sta->active_mimo3_rate; - if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) + if (iwl_is_ht40_tx_allowed(priv, ctx, sta)) tbl->is_ht40 = 1; else tbl->is_ht40 = 0; @@ -1409,7 +1408,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv, tbl->max_search = IWL_MAX_SEARCH; rate_mask = lq_sta->active_siso_rate; - if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) + if (iwl_is_ht40_tx_allowed(priv, ctx, sta)) tbl->is_ht40 = 1; else tbl->is_ht40 = 0; diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h index ad3aea8f626a..5d83cab22d62 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.h +++ b/drivers/net/wireless/iwlwifi/dvm/rs.h @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c index cac4f37cc427..a4eed2055fdb 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/iwlwifi/dvm/rx.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portionhelp of the ieee80211 subsystem header files. @@ -790,7 +790,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); - ieee80211_rx(priv->hw, skb); + ieee80211_rx_ni(priv->hw, skb); } static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 9a891e6e60e8..23be948cf162 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -1545,10 +1545,9 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, bss_conf->bssid); } - if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC && - priv->beacon_ctx) { + if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) { if (iwlagn_update_beacon(priv, vif)) - IWL_ERR(priv, "Error sending IBSS beacon\n"); + IWL_ERR(priv, "Error updating beacon\n"); } mutex_unlock(&priv->mutex); diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c index 610ed2204e1f..3a4aa5239c45 100644 --- a/drivers/net/wireless/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/iwlwifi/dvm/scan.c @@ -2,7 +2,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c index bdba9543c351..94ef33838bc6 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -77,7 +77,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv, IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", sta_id); - spin_lock(&priv->sta_lock); + spin_lock_bh(&priv->sta_lock); switch (add_sta_resp->status) { case ADD_STA_SUCCESS_MSK: @@ -119,7 +119,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv, priv->stations[sta_id].sta.mode == STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); - spin_unlock(&priv->sta_lock); + spin_unlock_bh(&priv->sta_lock); return ret; } @@ -173,7 +173,7 @@ int iwl_send_add_sta(struct iwl_priv *priv, bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct ieee80211_sta_ht_cap *ht_cap) + struct ieee80211_sta *sta) { if (!ctx->ht.enabled || !ctx->ht.is_40mhz) return false; @@ -183,20 +183,11 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, return false; #endif - /* - * Remainder of this function checks ht_cap, but if it's - * NULL then we can do HT40 (special case for RXON) - */ - if (!ht_cap) + /* special case for RXON */ + if (!sta) return true; - if (!ht_cap->ht_supported) - return false; - - if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) - return false; - - return true; + return sta->bandwidth >= IEEE80211_STA_RX_BW_40; } static void iwl_sta_calc_ht_flags(struct iwl_priv *priv, @@ -205,7 +196,6 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv, __le32 *flags, __le32 *mask) { struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; - u8 mimo_ps_mode; *mask = STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK | @@ -217,26 +207,24 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv, if (!sta || !sta_ht_inf->ht_supported) return; - mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; - IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n", sta->addr, - (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? + (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" : - (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? + (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" : "disabled"); - switch (mimo_ps_mode) { - case WLAN_HT_CAP_SM_PS_STATIC: + switch (sta->smps_mode) { + case IEEE80211_SMPS_STATIC: *flags |= STA_FLG_MIMO_DIS_MSK; break; - case WLAN_HT_CAP_SM_PS_DYNAMIC: + case IEEE80211_SMPS_DYNAMIC: *flags |= STA_FLG_RTS_MIMO_PROT_MSK; break; - case WLAN_HT_CAP_SM_PS_DISABLED: + case IEEE80211_SMPS_OFF: break; default: - IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode); + IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->smps_mode); break; } @@ -246,7 +234,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv, *flags |= cpu_to_le32( (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); - if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) + if (iwl_is_ht40_tx_allowed(priv, ctx, sta)) *flags |= STA_FLG_HT40_EN_MSK; } diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c index 57b918ce3b5f..dc6f965a123a 100644 --- a/drivers/net/wireless/iwlwifi/dvm/testmode.c +++ b/drivers/net/wireless/iwlwifi/dvm/testmode.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c index eb864433e59d..03f9bc01c0cc 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tt.c +++ b/drivers/net/wireless/iwlwifi/dvm/tt.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -185,10 +185,8 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data) priv->thermal_throttle.ct_kill_toggle = true; } iwl_read32(priv->trans, CSR_UCODE_DRV_GP1); - spin_lock_irqsave(&priv->trans->reg_lock, flags); - if (likely(iwl_grab_nic_access(priv->trans))) - iwl_release_nic_access(priv->trans); - spin_unlock_irqrestore(&priv->trans->reg_lock, flags); + if (iwl_trans_grab_nic_access(priv->trans, false, &flags)) + iwl_trans_release_nic_access(priv->trans, &flags); /* Reschedule the ct_kill timer to occur in * CT_KILL_EXIT_DURATION seconds to ensure we get a @@ -473,8 +471,8 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force) set_bit(STATUS_CT_KILL, &priv->status); iwl_perform_ct_kill_task(priv, true); } else { - iwl_prepare_ct_kill_task(priv); tt->state = old_state; + iwl_prepare_ct_kill_task(priv); } } else if (old_state == IWL_TI_CT_KILL && tt->state != IWL_TI_CT_KILL) { diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.h b/drivers/net/wireless/iwlwifi/dvm/tt.h index 44c7c8f30a2d..9356c4b908ca 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tt.h +++ b/drivers/net/wireless/iwlwifi/dvm/tt.h @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 279796419ea0..6aec2df3bb27 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -2,7 +2,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -231,13 +231,11 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv, memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); if (info->flags & IEEE80211_TX_CTL_AMPDU) tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; - IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); break; case WLAN_CIPHER_SUITE_TKIP: tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); - IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); break; case WLAN_CIPHER_SUITE_WEP104: @@ -355,8 +353,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, } } - IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); - if (sta) sta_priv = (void *)sta->drv_priv; @@ -472,6 +468,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, WARN_ON_ONCE(is_agg && priv->queue_to_mac80211[txq_id] != info->hw_queue); + IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid, + txq_id, seq_number); + if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; @@ -541,9 +540,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, spin_lock_bh(&priv->sta_lock); tid_data = &priv->tid_data[sta_id][tid]; - txq_id = priv->tid_data[sta_id][tid].agg.txq_id; + txq_id = tid_data->agg.txq_id; - switch (priv->tid_data[sta_id][tid].agg.state) { + switch (tid_data->agg.state) { case IWL_EMPTYING_HW_QUEUE_ADDBA: /* * This can happen if the peer stops aggregation @@ -563,9 +562,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, case IWL_AGG_ON: break; default: - IWL_WARN(priv, "Stopping AGG while state not ON " - "or starting for %d on %d (%d)\n", sta_id, tid, - priv->tid_data[sta_id][tid].agg.state); + IWL_WARN(priv, + "Stopping AGG while state not ON or starting for %d on %d (%d)\n", + sta_id, tid, tid_data->agg.state); spin_unlock_bh(&priv->sta_lock); return 0; } @@ -578,12 +577,11 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, "stopping AGG on STA/TID %d/%d but hwq %d not used\n", sta_id, tid, txq_id); } else if (tid_data->agg.ssn != tid_data->next_reclaimed) { - IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " - "next_recl = %d\n", + IWL_DEBUG_TX_QUEUES(priv, + "Can't proceed: ssn %d, next_recl = %d\n", tid_data->agg.ssn, tid_data->next_reclaimed); - priv->tid_data[sta_id][tid].agg.state = - IWL_EMPTYING_HW_QUEUE_DELBA; + tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA; spin_unlock_bh(&priv->sta_lock); return 0; } @@ -591,8 +589,8 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n", tid_data->agg.ssn); turn_off: - agg_state = priv->tid_data[sta_id][tid].agg.state; - priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF; + agg_state = tid_data->agg.state; + tid_data->agg.state = IWL_AGG_OFF; spin_unlock_bh(&priv->sta_lock); @@ -910,6 +908,12 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status) } } +static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp) +{ + return le32_to_cpup((__le32 *)&tx_resp->status + + tx_resp->frame_count) & MAX_SN; +} + static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, struct iwlagn_tx_resp *tx_resp) { @@ -944,9 +948,15 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, if (tx_resp->frame_count == 1) return; + IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n", + agg->txq_id, + le32_to_cpu(tx_resp->rate_n_flags), + iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count); + /* Construct bit-map of pending frames within Tx window */ for (i = 0; i < tx_resp->frame_count; i++) { u16 fstatus = le16_to_cpu(frame_status[i].status); + u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS; if (status & AGG_TX_STATUS_MSK) iwlagn_count_agg_tx_err_status(priv, fstatus); @@ -955,11 +965,13 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, AGG_TX_STATE_ABORT_MSK)) continue; - IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), " - "try-count (0x%08x)\n", - iwl_get_agg_tx_fail_reason(fstatus), - fstatus & AGG_TX_STATUS_MSK, - fstatus & AGG_TX_TRY_MSK); + if (status & AGG_TX_STATUS_MSK || retry_cnt > 1) + IWL_DEBUG_TX_REPLY(priv, + "%d: status %s (0x%04x), try-count (0x%01x)\n", + i, + iwl_get_agg_tx_fail_reason(fstatus), + fstatus & AGG_TX_STATUS_MSK, + retry_cnt); } } @@ -990,12 +1002,6 @@ const char *iwl_get_agg_tx_fail_reason(u16 status) } #endif /* CONFIG_IWLWIFI_DEBUG */ -static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp) -{ - return le32_to_cpup((__le32 *)&tx_resp->status + - tx_resp->frame_count) & MAX_SN; -} - static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status) { status &= TX_STATUS_MSK; @@ -1125,10 +1131,16 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >> IWLAGN_TX_RES_RA_POS; - spin_lock(&priv->sta_lock); + spin_lock_bh(&priv->sta_lock); - if (is_agg) + if (is_agg) { + WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT || + tid >= IWL_MAX_TID_COUNT); + if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id) + IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id, + priv->tid_data[sta_id][tid].agg.txq_id); iwl_rx_reply_tx_agg(priv, tx_resp); + } __skb_queue_head_init(&skbs); @@ -1215,22 +1227,41 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, freed++; } - WARN_ON(!is_agg && freed != 1); + if (tid != IWL_TID_NON_QOS) { + priv->tid_data[sta_id][tid].next_reclaimed = + next_reclaimed; + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", + next_reclaimed); + } + + if (!is_agg && freed != 1) + IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed); /* * An offchannel frame can be send only on the AUX queue, where * there is no aggregation (and reordering) so it only is single * skb is expected to be processed. */ - WARN_ON(is_offchannel_skb && freed != 1); + if (is_offchannel_skb && freed != 1) + IWL_ERR(priv, "OFFCHANNEL SKB freed %d\n", freed); + + IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id, + iwl_get_tx_fail_reason(status), status); + + IWL_DEBUG_TX_REPLY(priv, + "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n", + le32_to_cpu(tx_resp->rate_n_flags), + tx_resp->failure_frame, + SEQ_TO_INDEX(sequence), ssn, + le16_to_cpu(tx_resp->seq_ctl)); } iwl_check_abort_status(priv, tx_resp->frame_count, status); - spin_unlock(&priv->sta_lock); + spin_unlock_bh(&priv->sta_lock); while (!skb_queue_empty(&skbs)) { skb = __skb_dequeue(&skbs); - ieee80211_tx_status(priv->hw, skb); + ieee80211_tx_status_ni(priv->hw, skb); } if (is_offchannel_skb) @@ -1277,12 +1308,12 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, tid = ba_resp->tid; agg = &priv->tid_data[sta_id][tid].agg; - spin_lock(&priv->sta_lock); + spin_lock_bh(&priv->sta_lock); if (unlikely(!agg->wait_for_ba)) { if (unlikely(ba_resp->bitmap)) IWL_ERR(priv, "Received BA when not expected\n"); - spin_unlock(&priv->sta_lock); + spin_unlock_bh(&priv->sta_lock); return 0; } @@ -1296,7 +1327,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, IWL_DEBUG_TX_QUEUES(priv, "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n", scd_flow, sta_id, tid, agg->txq_id); - spin_unlock(&priv->sta_lock); + spin_unlock_bh(&priv->sta_lock); return 0; } @@ -1365,11 +1396,11 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, } } - spin_unlock(&priv->sta_lock); + spin_unlock_bh(&priv->sta_lock); while (!skb_queue_empty(&reclaimed_skbs)) { skb = __skb_dequeue(&reclaimed_skbs); - ieee80211_tx_status(priv->hw, skb); + ieee80211_tx_status_ni(priv->hw, skb); } return 0; diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index c6467e5554f5..736fe9bb140e 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c @@ -2,7 +2,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -286,89 +286,6 @@ static int iwl_alive_notify(struct iwl_priv *priv) return iwl_send_calib_results(priv); } - -/** - * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, - * using sample data 100 bytes apart. If these sample points are good, - * it's a pretty good bet that everything between them is good, too. - */ -static int iwl_verify_sec_sparse(struct iwl_priv *priv, - const struct fw_desc *fw_desc) -{ - __le32 *image = (__le32 *)fw_desc->data; - u32 len = fw_desc->len; - u32 val; - u32 i; - - IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len); - - for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { - /* read data comes through single port, auto-incr addr */ - /* NOTE: Use the debugless read so we don't flood kernel log - * if IWL_DL_IO is set */ - iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR, - i + fw_desc->offset); - val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); - if (val != le32_to_cpu(*image)) - return -EIO; - } - - return 0; -} - -static void iwl_print_mismatch_sec(struct iwl_priv *priv, - const struct fw_desc *fw_desc) -{ - __le32 *image = (__le32 *)fw_desc->data; - u32 len = fw_desc->len; - u32 val; - u32 offs; - int errors = 0; - - IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len); - - iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR, - fw_desc->offset); - - for (offs = 0; - offs < len && errors < 20; - offs += sizeof(u32), image++) { - /* read data comes through single port, auto-incr addr */ - val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); - if (val != le32_to_cpu(*image)) { - IWL_ERR(priv, "uCode INST section at " - "offset 0x%x, is 0x%x, s/b 0x%x\n", - offs, val, le32_to_cpu(*image)); - errors++; - } - } -} - -/** - * iwl_verify_ucode - determine which instruction image is in SRAM, - * and verify its contents - */ -static int iwl_verify_ucode(struct iwl_priv *priv, - enum iwl_ucode_type ucode_type) -{ - const struct fw_img *img = iwl_get_ucode_image(priv, ucode_type); - - if (!img) { - IWL_ERR(priv, "Invalid ucode requested (%d)\n", ucode_type); - return -EINVAL; - } - - if (!iwl_verify_sec_sparse(priv, &img->sec[IWL_UCODE_SECTION_INST])) { - IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n"); - return 0; - } - - IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n"); - - iwl_print_mismatch_sec(priv, &img->sec[IWL_UCODE_SECTION_INST]); - return -EIO; -} - struct iwl_alive_data { bool valid; u8 subtype; @@ -426,7 +343,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, alive_cmd, ARRAY_SIZE(alive_cmd), iwl_alive_fn, &alive_data); - ret = iwl_trans_start_fw(priv->trans, fw); + ret = iwl_trans_start_fw(priv->trans, fw, false); if (ret) { priv->cur_ucode = old_type; iwl_remove_notification(&priv->notif_wait, &alive_wait); @@ -450,18 +367,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, return -EIO; } - /* - * This step takes a long time (60-80ms!!) and - * WoWLAN image should be loaded quickly, so - * skip it for WoWLAN. - */ if (ucode_type != IWL_UCODE_WOWLAN) { - ret = iwl_verify_ucode(priv, ucode_type); - if (ret) { - priv->cur_ucode = old_type; - return ret; - } - /* delay a bit to give rfkill time to run */ msleep(5); } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h index 7960a52f6ad4..e9975c54c276 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 864219d2136a..743b48343358 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -83,6 +83,7 @@ enum iwl_device_family { IWL_DEVICE_FAMILY_6030, IWL_DEVICE_FAMILY_6050, IWL_DEVICE_FAMILY_6150, + IWL_DEVICE_FAMILY_7000, }; /* diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index 34a5287dfc2f..df3463a38704 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -381,8 +381,8 @@ /* LED */ #define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF) -#define CSR_LED_REG_TRUN_ON (0x78) -#define CSR_LED_REG_TRUN_OFF (0x38) +#define CSR_LED_REG_TURN_ON (0x60) +#define CSR_LED_REG_TURN_OFF (0x20) /* ANA_PLL */ #define CSR50_ANA_PLL_CFG_VAL (0x00880300) diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h index 42b20b0e83bc..8cf5db7fb5c9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project. * @@ -116,6 +116,7 @@ do { \ #define IWL_DL_HCMD 0x00000004 #define IWL_DL_STATE 0x00000008 /* 0x000000F0 - 0x00000010 */ +#define IWL_DL_TE 0x00000020 #define IWL_DL_EEPROM 0x00000040 #define IWL_DL_RADIO 0x00000080 /* 0x00000F00 - 0x00000100 */ @@ -156,6 +157,7 @@ do { \ #define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) #define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) #define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) +#define IWL_DEBUG_TE(p, f, a...) IWL_DEBUG(p, IWL_DL_TE, f, ## a) #define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a) #define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) #define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c index 70191ddbd8f6..8f61c717f619 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index dc7e26b2f383..9a0f45ec9e01 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index d3549f493a17..6f228bb2b844 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -139,8 +139,10 @@ struct iwl_drv { #endif }; -#define DVM_OP_MODE 0 -#define MVM_OP_MODE 1 +enum { + DVM_OP_MODE = 0, + MVM_OP_MODE = 1, +}; /* Protects the table contents, i.e. the ops pointer & drv list */ static struct mutex iwlwifi_opmode_table_mtx; @@ -149,8 +151,8 @@ static struct iwlwifi_opmode_table { const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */ struct list_head drv; /* list of devices using this op_mode */ } iwlwifi_opmode_table[] = { /* ops set when driver is initialized */ - { .name = "iwldvm", .ops = NULL }, - { .name = "iwlmvm", .ops = NULL }, + [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL }, + [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL }, }; /* @@ -268,7 +270,7 @@ struct fw_sec_parsing { */ struct iwl_tlv_calib_data { __le32 ucode_type; - __le64 calib; + struct iwl_tlv_calib_ctrl calib; } __packed; struct iwl_firmware_pieces { @@ -358,7 +360,11 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data) ucode_type); return -EINVAL; } - drv->fw.default_calib[ucode_type] = le64_to_cpu(def_calib->calib); + drv->fw.default_calib[ucode_type].flow_trigger = + def_calib->calib.flow_trigger; + drv->fw.default_calib[ucode_type].event_trigger = + def_calib->calib.event_trigger; + return 0; } @@ -959,7 +965,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) release_firmware(ucode_raw); mutex_lock(&iwlwifi_opmode_table_mtx); - op = &iwlwifi_opmode_table[DVM_OP_MODE]; + if (fw->mvm_fw) + op = &iwlwifi_opmode_table[MVM_OP_MODE]; + else + op = &iwlwifi_opmode_table[DVM_OP_MODE]; /* add this device to the list of devices using this op_mode */ list_add_tail(&drv->list, &op->drv); diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h index 285de5f68c05..594a5c71b272 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/iwlwifi/iwl-drv.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -66,7 +66,7 @@ /* for all modules */ #define DRV_NAME "iwlwifi" #define IWLWIFI_VERSION "in-tree:" -#define DRV_COPYRIGHT "Copyright(c) 2003-2012 Intel Corporation" +#define DRV_COPYRIGHT "Copyright(c) 2003-2013 Intel Corporation" #define DRV_AUTHOR "<ilw@linux.intel.com>" diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c index 471986690cf0..034f2ff4f43d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -703,9 +703,9 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, return n_channels; } -static int iwl_init_sband_channels(struct iwl_nvm_data *data, - struct ieee80211_supported_band *sband, - int n_channels, enum ieee80211_band band) +int iwl_init_sband_channels(struct iwl_nvm_data *data, + struct ieee80211_supported_band *sband, + int n_channels, enum ieee80211_band band) { struct ieee80211_channel *chan = &data->channels[0]; int n = 0, idx = 0; @@ -728,10 +728,10 @@ static int iwl_init_sband_channels(struct iwl_nvm_data *data, #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ -static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band) +void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + struct ieee80211_sta_ht_cap *ht_info, + enum ieee80211_band band) { int max_bit_rate = 0; u8 rx_chains; diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h index 555f0eb61d48..683fe6a8c58f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -126,4 +126,13 @@ static inline void iwl_free_nvm_data(struct iwl_nvm_data *data) int iwl_nvm_check_version(struct iwl_nvm_data *data, struct iwl_trans *trans); +int iwl_init_sband_channels(struct iwl_nvm_data *data, + struct ieee80211_supported_band *sband, + int n_channels, enum ieee80211_band band); + +void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + struct ieee80211_sta_ht_cap *ht_info, + enum ieee80211_band band); + #endif /* __iwl_eeprom_parse_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c index 27c7da3c6ed1..ef4806f27cf8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h index 1337c9d36fee..b2588c5cbf93 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index ec48563d3c6a..f5592fb3b1ed 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -225,6 +225,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008) #define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG) +#define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c) +#define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG /** * Rx Config/Status Registers (RCSR) @@ -257,6 +259,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND) #define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0) +#define FH_MEM_RCSR_CHNL0_RBDCB_WPTR (FH_MEM_RCSR_CHNL0 + 0x8) +#define FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ (FH_MEM_RCSR_CHNL0 + 0x10) #define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */ #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */ @@ -410,6 +414,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) * uCode/driver must write "1" in order to clear this flag */ #define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018) +#define FH_TSSR_TX_MSG_CONFIG_REG (FH_TSSR_LOWER_BOUND + 0x008) #define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16) diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index e71564053e7f..90873eca35f7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index d1a86b66bc51..b545178e46e3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -139,6 +139,19 @@ struct fw_img { #define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) #define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) +/* + * Calibration control struct. + * Sent as part of the phy configuration command. + * @flow_trigger: bitmap for which calibrations to perform according to + * flow triggers. + * @event_trigger: bitmap for which calibrations to perform according to + * event triggers. + */ +struct iwl_tlv_calib_ctrl { + __le32 flow_trigger; + __le32 event_trigger; +} __packed; + /** * struct iwl_fw - variables associated with the firmware * @@ -153,11 +166,12 @@ struct fw_img { * @inst_evtlog_ptr: event log offset for runtime ucode. * @inst_evtlog_size: event log size for runtime ucode. * @inst_errlog_ptr: error log offfset for runtime ucode. + * @mvm_fw: indicates this is MVM firmware */ struct iwl_fw { u32 ucode_ver; - char fw_version[ETHTOOL_BUSINFO_LEN]; + char fw_version[ETHTOOL_FWVERS_LEN]; /* ucode images */ struct fw_img img[IWL_UCODE_TYPE_MAX]; @@ -168,7 +182,7 @@ struct iwl_fw { u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; - u64 default_calib[IWL_UCODE_TYPE_MAX]; + struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX]; u32 phy_config; bool mvm_fw; diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c index cdaff9572059..276410d82de4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/iwlwifi/iwl-io.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project. * @@ -35,54 +35,6 @@ #define IWL_POLL_INTERVAL 10 /* microseconds */ -static inline void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask) -{ - iwl_write32(trans, reg, iwl_read32(trans, reg) | mask); -} - -static inline void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask) -{ - iwl_write32(trans, reg, iwl_read32(trans, reg) & ~mask); -} - -void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask) -{ - unsigned long flags; - - spin_lock_irqsave(&trans->reg_lock, flags); - __iwl_set_bit(trans, reg, mask); - spin_unlock_irqrestore(&trans->reg_lock, flags); -} -EXPORT_SYMBOL_GPL(iwl_set_bit); - -void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask) -{ - unsigned long flags; - - spin_lock_irqsave(&trans->reg_lock, flags); - __iwl_clear_bit(trans, reg, mask); - spin_unlock_irqrestore(&trans->reg_lock, flags); -} -EXPORT_SYMBOL_GPL(iwl_clear_bit); - -void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) -{ - unsigned long flags; - u32 v; - -#ifdef CONFIG_IWLWIFI_DEBUG - WARN_ON_ONCE(value & ~mask); -#endif - - spin_lock_irqsave(&trans->reg_lock, flags); - v = iwl_read32(trans, reg); - v &= ~mask; - v |= value; - iwl_write32(trans, reg, v); - spin_unlock_irqrestore(&trans->reg_lock, flags); -} -EXPORT_SYMBOL_GPL(iwl_set_bits_mask); - int iwl_poll_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout) { @@ -99,87 +51,14 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr, } EXPORT_SYMBOL_GPL(iwl_poll_bit); -int iwl_grab_nic_access_silent(struct iwl_trans *trans) -{ - int ret; - - lockdep_assert_held(&trans->reg_lock); - - /* this bit wakes up the NIC */ - __iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - - /* - * These bits say the device is running, and should keep running for - * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), - * but they do not indicate that embedded SRAM is restored yet; - * 3945 and 4965 have volatile SRAM, and must save/restore contents - * to/from host DRAM when sleeping/waking for power-saving. - * Each direction takes approximately 1/4 millisecond; with this - * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a - * series of register accesses are expected (e.g. reading Event Log), - * to keep device from sleeping. - * - * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that - * SRAM is okay/restored. We don't check that here because this call - * is just for hardware register access; but GP1 MAC_SLEEP check is a - * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). - * - * 5000 series and later (including 1000 series) have non-volatile SRAM, - * and do not save/restore SRAM when power cycling. - */ - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | - CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); - if (ret < 0) { - iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); - return -EIO; - } - - return 0; -} -EXPORT_SYMBOL_GPL(iwl_grab_nic_access_silent); - -bool iwl_grab_nic_access(struct iwl_trans *trans) -{ - int ret = iwl_grab_nic_access_silent(trans); - if (unlikely(ret)) { - u32 val = iwl_read32(trans, CSR_GP_CNTRL); - WARN_ONCE(1, "Timeout waiting for hardware access " - "(CSR_GP_CNTRL 0x%08x)\n", val); - return false; - } - - return true; -} -EXPORT_SYMBOL_GPL(iwl_grab_nic_access); - -void iwl_release_nic_access(struct iwl_trans *trans) -{ - lockdep_assert_held(&trans->reg_lock); - __iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - /* - * Above we read the CSR_GP_CNTRL register, which will flush - * any previous writes, but we need the write that clears the - * MAC_ACCESS_REQ bit to be performed before any other writes - * scheduled on different CPUs (after we drop reg_lock). - */ - mmiowb(); -} -EXPORT_SYMBOL_GPL(iwl_release_nic_access); - u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) { - u32 value; + u32 value = 0x5a5a5a5a; unsigned long flags; - - spin_lock_irqsave(&trans->reg_lock, flags); - iwl_grab_nic_access(trans); - value = iwl_read32(trans, reg); - iwl_release_nic_access(trans); - spin_unlock_irqrestore(&trans->reg_lock, flags); + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + value = iwl_read32(trans, reg); + iwl_trans_release_nic_access(trans, &flags); + } return value; } @@ -189,12 +68,10 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value) { unsigned long flags; - spin_lock_irqsave(&trans->reg_lock, flags); - if (likely(iwl_grab_nic_access(trans))) { + if (iwl_trans_grab_nic_access(trans, false, &flags)) { iwl_write32(trans, reg, value); - iwl_release_nic_access(trans); + iwl_trans_release_nic_access(trans, &flags); } - spin_unlock_irqrestore(&trans->reg_lock, flags); } EXPORT_SYMBOL_GPL(iwl_write_direct32); @@ -230,13 +107,12 @@ static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs) { unsigned long flags; - u32 val; + u32 val = 0x5a5a5a5a; - spin_lock_irqsave(&trans->reg_lock, flags); - iwl_grab_nic_access(trans); - val = __iwl_read_prph(trans, ofs); - iwl_release_nic_access(trans); - spin_unlock_irqrestore(&trans->reg_lock, flags); + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + val = __iwl_read_prph(trans, ofs); + iwl_trans_release_nic_access(trans, &flags); + } return val; } EXPORT_SYMBOL_GPL(iwl_read_prph); @@ -245,12 +121,10 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) { unsigned long flags; - spin_lock_irqsave(&trans->reg_lock, flags); - if (likely(iwl_grab_nic_access(trans))) { + if (iwl_trans_grab_nic_access(trans, false, &flags)) { __iwl_write_prph(trans, ofs, val); - iwl_release_nic_access(trans); + iwl_trans_release_nic_access(trans, &flags); } - spin_unlock_irqrestore(&trans->reg_lock, flags); } EXPORT_SYMBOL_GPL(iwl_write_prph); @@ -258,13 +132,11 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) { unsigned long flags; - spin_lock_irqsave(&trans->reg_lock, flags); - if (likely(iwl_grab_nic_access(trans))) { + if (iwl_trans_grab_nic_access(trans, false, &flags)) { __iwl_write_prph(trans, ofs, __iwl_read_prph(trans, ofs) | mask); - iwl_release_nic_access(trans); + iwl_trans_release_nic_access(trans, &flags); } - spin_unlock_irqrestore(&trans->reg_lock, flags); } EXPORT_SYMBOL_GPL(iwl_set_bits_prph); @@ -273,13 +145,11 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, { unsigned long flags; - spin_lock_irqsave(&trans->reg_lock, flags); - if (likely(iwl_grab_nic_access(trans))) { + if (iwl_trans_grab_nic_access(trans, false, &flags)) { __iwl_write_prph(trans, ofs, (__iwl_read_prph(trans, ofs) & mask) | bits); - iwl_release_nic_access(trans); + iwl_trans_release_nic_access(trans, &flags); } - spin_unlock_irqrestore(&trans->reg_lock, flags); } EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph); @@ -288,67 +158,10 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) unsigned long flags; u32 val; - spin_lock_irqsave(&trans->reg_lock, flags); - if (likely(iwl_grab_nic_access(trans))) { + if (iwl_trans_grab_nic_access(trans, false, &flags)) { val = __iwl_read_prph(trans, ofs); __iwl_write_prph(trans, ofs, (val & ~mask)); - iwl_release_nic_access(trans); + iwl_trans_release_nic_access(trans, &flags); } - spin_unlock_irqrestore(&trans->reg_lock, flags); } EXPORT_SYMBOL_GPL(iwl_clear_bits_prph); - -void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) -{ - unsigned long flags; - int offs; - u32 *vals = buf; - - spin_lock_irqsave(&trans->reg_lock, flags); - if (likely(iwl_grab_nic_access(trans))) { - iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); - for (offs = 0; offs < dwords; offs++) - vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); - iwl_release_nic_access(trans); - } - spin_unlock_irqrestore(&trans->reg_lock, flags); -} -EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords); - -u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr) -{ - u32 value; - - _iwl_read_targ_mem_dwords(trans, addr, &value, 1); - - return value; -} -EXPORT_SYMBOL_GPL(iwl_read_targ_mem); - -int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, - const void *buf, int dwords) -{ - unsigned long flags; - int offs, result = 0; - const u32 *vals = buf; - - spin_lock_irqsave(&trans->reg_lock, flags); - if (likely(iwl_grab_nic_access(trans))) { - iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); - for (offs = 0; offs < dwords; offs++) - iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]); - iwl_release_nic_access(trans); - } else - result = -EBUSY; - spin_unlock_irqrestore(&trans->reg_lock, flags); - - return result; -} -EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords); - -int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val) -{ - return _iwl_write_targ_mem_dwords(trans, addr, &val, 1); -} -EXPORT_SYMBOL_GPL(iwl_write_targ_mem); diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h index 48dc753e3742..fd9f5b97fff3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/iwlwifi/iwl-io.h @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project. * @@ -51,20 +51,21 @@ static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs) return val; } -void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask); -void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask); +static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask) +{ + iwl_trans_set_bits_mask(trans, reg, mask, mask); +} -void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value); +static inline void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask) +{ + iwl_trans_set_bits_mask(trans, reg, mask, 0); +} int iwl_poll_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout); int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, int timeout); -int iwl_grab_nic_access_silent(struct iwl_trans *trans); -bool iwl_grab_nic_access(struct iwl_trans *trans); -void iwl_release_nic_access(struct iwl_trans *trans); - u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg); void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value); @@ -76,19 +77,4 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, u32 bits, u32 mask); void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); -void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr, - void *buf, int dwords); - -#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \ - do { \ - BUILD_BUG_ON((bufsize) % sizeof(u32)); \ - _iwl_read_targ_mem_dwords(trans, addr, buf, \ - (bufsize) / sizeof(u32));\ - } while (0) - -int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, - const void *buf, int dwords); - -u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr); -int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val); #endif diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h index d9a86d6b2bd7..e5e3a79eae2f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c index c61f2070f15a..c3affbc62cdf 100644 --- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c +++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h index 821523100cf1..c2ce764463a3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h +++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c new file mode 100644 index 000000000000..a70213bdb83c --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -0,0 +1,346 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/export.h> +#include "iwl-modparams.h" +#include "iwl-nvm-parse.h" + +/* NVM offsets (in words) definitions */ +enum wkp_nvm_offsets { + /* NVM HW-Section offset (in words) definitions */ + HW_ADDR = 0x15, + +/* NVM SW-Section offset (in words) definitions */ + NVM_SW_SECTION = 0x1C0, + NVM_VERSION = 0, + RADIO_CFG = 1, + SKU = 2, + N_HW_ADDRS = 3, + NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION, + +/* NVM calibration section offset (in words) definitions */ + NVM_CALIB_SECTION = 0x2B8, + XTAL_CALIB = 0x316 - NVM_CALIB_SECTION +}; + +/* SKU Capabilities (actual values from NVM definition) */ +enum nvm_sku_bits { + NVM_SKU_CAP_BAND_24GHZ = BIT(0), + NVM_SKU_CAP_BAND_52GHZ = BIT(1), + NVM_SKU_CAP_11N_ENABLE = BIT(2), +}; + +/* radio config bits (actual values from NVM definition) */ +#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */ +#define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ +#define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ +#define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ +#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ +#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ + +/* + * These are the channel numbers in the order that they are stored in the NVM + */ +static const u8 iwl_nvm_channels[] = { + /* 2.4 GHz */ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + /* 5 GHz */ + 36, 40, 44 , 48, 52, 56, 60, 64, + 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, + 149, 153, 157, 161, 165 +}; + +#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) +#define NUM_2GHZ_CHANNELS 14 +#define FIRST_2GHZ_HT_MINUS 5 +#define LAST_2GHZ_HT_PLUS 9 +#define LAST_5GHZ_HT 161 + + +/* rate data (static) */ +static struct ieee80211_rate iwl_cfg80211_rates[] = { + { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, + { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1, + .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, + { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2, + .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, + { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3, + .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, + { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, }, + { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, }, + { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, }, + { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, }, + { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, }, + { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, }, + { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, }, + { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, }, +}; +#define RATES_24_OFFS 0 +#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates) +#define RATES_52_OFFS 4 +#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS) + +/** + * enum iwl_nvm_channel_flags - channel flags in NVM + * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo + * @NVM_CHANNEL_IBSS: usable as an IBSS channel + * @NVM_CHANNEL_ACTIVE: active scanning allowed + * @NVM_CHANNEL_RADAR: radar detection required + * @NVM_CHANNEL_DFS: dynamic freq selection candidate + * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?) + * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?) + */ +enum iwl_nvm_channel_flags { + NVM_CHANNEL_VALID = BIT(0), + NVM_CHANNEL_IBSS = BIT(1), + NVM_CHANNEL_ACTIVE = BIT(3), + NVM_CHANNEL_RADAR = BIT(4), + NVM_CHANNEL_DFS = BIT(7), + NVM_CHANNEL_WIDE = BIT(8), + NVM_CHANNEL_40MHZ = BIT(9), +}; + +#define CHECK_AND_PRINT_I(x) \ + ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "") + +static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + const __le16 * const nvm_ch_flags) +{ + int ch_idx; + int n_channels = 0; + struct ieee80211_channel *channel; + u16 ch_flags; + bool is_5ghz; + + for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) { + ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); + if (!(ch_flags & NVM_CHANNEL_VALID)) { + IWL_DEBUG_EEPROM(dev, + "Ch. %d Flags %x [%sGHz] - No traffic\n", + iwl_nvm_channels[ch_idx], + ch_flags, + (ch_idx >= NUM_2GHZ_CHANNELS) ? + "5.2" : "2.4"); + continue; + } + + channel = &data->channels[n_channels]; + n_channels++; + + channel->hw_value = iwl_nvm_channels[ch_idx]; + channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + channel->center_freq = + ieee80211_channel_to_frequency( + channel->hw_value, channel->band); + + /* TODO: Need to be dependent to the NVM */ + channel->flags = IEEE80211_CHAN_NO_HT40; + if (ch_idx < NUM_2GHZ_CHANNELS && + (ch_flags & NVM_CHANNEL_40MHZ)) { + if (iwl_nvm_channels[ch_idx] <= LAST_2GHZ_HT_PLUS) + channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; + if (iwl_nvm_channels[ch_idx] >= FIRST_2GHZ_HT_MINUS) + channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; + } else if (iwl_nvm_channels[ch_idx] <= LAST_5GHZ_HT && + (ch_flags & NVM_CHANNEL_40MHZ)) { + if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) + channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; + else + channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; + } + + if (!(ch_flags & NVM_CHANNEL_IBSS)) + channel->flags |= IEEE80211_CHAN_NO_IBSS; + + if (!(ch_flags & NVM_CHANNEL_ACTIVE)) + channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN; + + if (ch_flags & NVM_CHANNEL_RADAR) + channel->flags |= IEEE80211_CHAN_RADAR; + + /* Initialize regulatory-based run-time data */ + + /* TODO: read the real value from the NVM */ + channel->max_power = 0; + is_5ghz = channel->band == IEEE80211_BAND_5GHZ; + IWL_DEBUG_EEPROM(dev, + "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", + channel->hw_value, + is_5ghz ? "5.2" : "2.4", + CHECK_AND_PRINT_I(VALID), + CHECK_AND_PRINT_I(IBSS), + CHECK_AND_PRINT_I(ACTIVE), + CHECK_AND_PRINT_I(RADAR), + CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(DFS), + ch_flags, + channel->max_power, + ((ch_flags & NVM_CHANNEL_IBSS) && + !(ch_flags & NVM_CHANNEL_RADAR)) + ? "" : "not "); + } + + return n_channels; +} + +static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, const __le16 *nvm_sw) +{ + int n_channels = iwl_init_channel_map(dev, cfg, data, + &nvm_sw[NVM_CHANNELS]); + int n_used = 0; + struct ieee80211_supported_band *sband; + + sband = &data->bands[IEEE80211_BAND_2GHZ]; + sband->band = IEEE80211_BAND_2GHZ; + sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; + sband->n_bitrates = N_RATES_24; + n_used += iwl_init_sband_channels(data, sband, n_channels, + IEEE80211_BAND_2GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ); + + sband = &data->bands[IEEE80211_BAND_5GHZ]; + sband->band = IEEE80211_BAND_5GHZ; + sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; + sband->n_bitrates = N_RATES_52; + n_used += iwl_init_sband_channels(data, sband, n_channels, + IEEE80211_BAND_5GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ); + + if (n_channels != n_used) + IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", + n_used, n_channels); +} + +struct iwl_nvm_data * +iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, + const __le16 *nvm_hw, const __le16 *nvm_sw, + const __le16 *nvm_calib) +{ + struct iwl_nvm_data *data; + u8 hw_addr[ETH_ALEN]; + u16 radio_cfg, sku; + + data = kzalloc(sizeof(*data) + + sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, + GFP_KERNEL); + if (!data) + return NULL; + + data->nvm_version = le16_to_cpup(nvm_sw + NVM_VERSION); + + radio_cfg = le16_to_cpup(nvm_sw + RADIO_CFG); + data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); + data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg); + data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg); + data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg); + data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK(radio_cfg); + data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK(radio_cfg); + + sku = le16_to_cpup(nvm_sw + SKU); + data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; + data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; + data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) + data->sku_cap_11n_enable = false; + + /* check overrides (some devices have wrong NVM) */ + if (cfg->valid_tx_ant) + data->valid_tx_ant = cfg->valid_tx_ant; + if (cfg->valid_rx_ant) + data->valid_rx_ant = cfg->valid_rx_ant; + + if (!data->valid_tx_ant || !data->valid_rx_ant) { + IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n", + data->valid_tx_ant, data->valid_rx_ant); + kfree(data); + return NULL; + } + + data->n_hw_addrs = le16_to_cpup(nvm_sw + N_HW_ADDRS); + + data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); + data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); + + /* The byte order is little endian 16 bit, meaning 214365 */ + memcpy(hw_addr, nvm_hw + HW_ADDR, ETH_ALEN); + data->hw_addr[0] = hw_addr[1]; + data->hw_addr[1] = hw_addr[0]; + data->hw_addr[2] = hw_addr[3]; + data->hw_addr[3] = hw_addr[2]; + data->hw_addr[4] = hw_addr[5]; + data->hw_addr[5] = hw_addr[4]; + + iwl_init_sbands(dev, cfg, data, nvm_sw); + + data->calib_version = 255; /* TODO: + this value will prevent some checks from + failing, we need to check if this + field is still needed, and if it does, + where is it in the NVM*/ + + return data; +} +EXPORT_SYMBOL_GPL(iwl_parse_nvm_data); diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h new file mode 100644 index 000000000000..b2692bd287fa --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h @@ -0,0 +1,80 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +#ifndef __iwl_nvm_parse_h__ +#define __iwl_nvm_parse_h__ + +#include "iwl-eeprom-parse.h" + +/** + * iwl_parse_nvm_data - parse NVM data and return values + * + * This function parses all NVM values we need and then + * returns a (newly allocated) struct containing all the + * relevant values for driver use. The struct must be freed + * later with iwl_free_nvm_data(). + */ +struct iwl_nvm_data * +iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, + const __le16 *nvm_hw, const __le16 *nvm_sw, + const __le16 *nvm_calib); + +#endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h index c8d9b9517468..4a680019e117 100644 --- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -63,6 +63,8 @@ #ifndef __iwl_op_mode_h__ #define __iwl_op_mode_h__ +#include <linux/debugfs.h> + struct iwl_op_mode; struct iwl_trans; struct sk_buff; @@ -111,13 +113,13 @@ struct iwl_cfg; * May sleep * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the * HCMD the this Rx responds to. - * Must be atomic and called with BH disabled. + * This callback may sleep, it is called from a threaded IRQ handler. * @queue_full: notifies that a HW queue is full. * Must be atomic and called with BH disabled. * @queue_not_full: notifies that a HW queue is not full any more. * Must be atomic and called with BH disabled. * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that - * the radio is killed. Must be atomic. + * the radio is killed. May sleep. * @free_skb: allows the transport layer to free skbs that haven't been * reclaimed by the op_mode. This can happen when the driver is freed and * there are Tx packets pending in the transport layer. @@ -128,8 +130,7 @@ struct iwl_cfg; * called with BH disabled. * @nic_config: configure NIC, called before firmware is started. * May sleep - * @wimax_active: invoked when WiMax becomes active. Must be atomic and called - * with BH disabled. + * @wimax_active: invoked when WiMax becomes active. May sleep */ struct iwl_op_mode_ops { struct iwl_op_mode *(*start)(struct iwl_trans *trans, @@ -176,6 +177,7 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) { + might_sleep(); return op_mode->ops->rx(op_mode, rxb, cmd); } @@ -194,6 +196,7 @@ static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode, static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state) { + might_sleep(); op_mode->ops->hw_rf_kill(op_mode, state); } @@ -221,6 +224,7 @@ static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode) static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode) { + might_sleep(); op_mode->ops->wimax_active(op_mode); } diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c new file mode 100644 index 000000000000..14fc8d39fc28 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c @@ -0,0 +1,514 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/export.h> + +#include "iwl-phy-db.h" +#include "iwl-debug.h" +#include "iwl-op-mode.h" +#include "iwl-trans.h" + +#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */ +#define IWL_NUM_PAPD_CH_GROUPS 4 +#define IWL_NUM_TXP_CH_GROUPS 9 + +struct iwl_phy_db_entry { + u16 size; + u8 *data; +}; + +/** + * struct iwl_phy_db - stores phy configuration and calibration data. + * + * @cfg: phy configuration. + * @calib_nch: non channel specific calibration data. + * @calib_ch: channel specific calibration data. + * @calib_ch_group_papd: calibration data related to papd channel group. + * @calib_ch_group_txp: calibration data related to tx power chanel group. + */ +struct iwl_phy_db { + struct iwl_phy_db_entry cfg; + struct iwl_phy_db_entry calib_nch; + struct iwl_phy_db_entry calib_ch; + struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS]; + struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS]; + + u32 channel_num; + u32 channel_size; + + struct iwl_trans *trans; +}; + +enum iwl_phy_db_section_type { + IWL_PHY_DB_CFG = 1, + IWL_PHY_DB_CALIB_NCH, + IWL_PHY_DB_CALIB_CH, + IWL_PHY_DB_CALIB_CHG_PAPD, + IWL_PHY_DB_CALIB_CHG_TXP, + IWL_PHY_DB_MAX +}; + +#define PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */ + +/* + * phy db - configure operational ucode + */ +struct iwl_phy_db_cmd { + __le16 type; + __le16 length; + u8 data[]; +} __packed; + +/* for parsing of tx power channel group data that comes from the firmware*/ +struct iwl_phy_db_chg_txp { + __le32 space; + __le16 max_channel_idx; +} __packed; + +/* + * phy db - Receieve phy db chunk after calibrations + */ +struct iwl_calib_res_notif_phy_db { + __le16 type; + __le16 length; + u8 data[]; +} __packed; + +#define IWL_PHY_DB_STATIC_PIC cpu_to_le32(0x21436587) +static inline void iwl_phy_db_test_pic(__le32 pic) +{ + WARN_ON(IWL_PHY_DB_STATIC_PIC != pic); +} + +struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans) +{ + struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db), + GFP_KERNEL); + + if (!phy_db) + return phy_db; + + phy_db->trans = trans; + + /* TODO: add default values of the phy db. */ + return phy_db; +} +EXPORT_SYMBOL(iwl_phy_db_init); + +/* + * get phy db section: returns a pointer to a phy db section specified by + * type and channel group id. + */ +static struct iwl_phy_db_entry * +iwl_phy_db_get_section(struct iwl_phy_db *phy_db, + enum iwl_phy_db_section_type type, + u16 chg_id) +{ + if (!phy_db || type >= IWL_PHY_DB_MAX) + return NULL; + + switch (type) { + case IWL_PHY_DB_CFG: + return &phy_db->cfg; + case IWL_PHY_DB_CALIB_NCH: + return &phy_db->calib_nch; + case IWL_PHY_DB_CALIB_CH: + return &phy_db->calib_ch; + case IWL_PHY_DB_CALIB_CHG_PAPD: + if (chg_id >= IWL_NUM_PAPD_CH_GROUPS) + return NULL; + return &phy_db->calib_ch_group_papd[chg_id]; + case IWL_PHY_DB_CALIB_CHG_TXP: + if (chg_id >= IWL_NUM_TXP_CH_GROUPS) + return NULL; + return &phy_db->calib_ch_group_txp[chg_id]; + default: + return NULL; + } + return NULL; +} + +static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db, + enum iwl_phy_db_section_type type, + u16 chg_id) +{ + struct iwl_phy_db_entry *entry = + iwl_phy_db_get_section(phy_db, type, chg_id); + if (!entry) + return; + + kfree(entry->data); + entry->data = NULL; + entry->size = 0; +} + +void iwl_phy_db_free(struct iwl_phy_db *phy_db) +{ + int i; + + if (!phy_db) + return; + + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0); + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0); + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0); + for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++) + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i); + for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i); + + kfree(phy_db); +} +EXPORT_SYMBOL(iwl_phy_db_free); + +int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, + gfp_t alloc_ctx) +{ + struct iwl_calib_res_notif_phy_db *phy_db_notif = + (struct iwl_calib_res_notif_phy_db *)pkt->data; + enum iwl_phy_db_section_type type = le16_to_cpu(phy_db_notif->type); + u16 size = le16_to_cpu(phy_db_notif->length); + struct iwl_phy_db_entry *entry; + u16 chg_id = 0; + + if (!phy_db) + return -EINVAL; + + if (type == IWL_PHY_DB_CALIB_CHG_PAPD || + type == IWL_PHY_DB_CALIB_CHG_TXP) + chg_id = le16_to_cpup((__le16 *)phy_db_notif->data); + + entry = iwl_phy_db_get_section(phy_db, type, chg_id); + if (!entry) + return -EINVAL; + + kfree(entry->data); + entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx); + if (!entry->data) { + entry->size = 0; + return -ENOMEM; + } + + entry->size = size; + + if (type == IWL_PHY_DB_CALIB_CH) { + phy_db->channel_num = + le32_to_cpup((__le32 *)phy_db_notif->data); + phy_db->channel_size = + (size - CHANNEL_NUM_SIZE) / phy_db->channel_num; + } + + /* Test PIC */ + if (type != IWL_PHY_DB_CFG) + iwl_phy_db_test_pic(*(((__le32 *)phy_db_notif->data) + + (size / sizeof(__le32)) - 1)); + + IWL_DEBUG_INFO(phy_db->trans, + "%s(%d): [PHYDB]SET: Type %d , Size: %d\n", + __func__, __LINE__, type, size); + + return 0; +} +EXPORT_SYMBOL(iwl_phy_db_set_section); + +static int is_valid_channel(u16 ch_id) +{ + if (ch_id <= 14 || + (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) || + (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) || + (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1)) + return 1; + return 0; +} + +static u8 ch_id_to_ch_index(u16 ch_id) +{ + if (WARN_ON(!is_valid_channel(ch_id))) + return 0xff; + + if (ch_id <= 14) + return ch_id - 1; + if (ch_id <= 64) + return (ch_id + 20) / 4; + if (ch_id <= 140) + return (ch_id - 12) / 4; + return (ch_id - 13) / 4; +} + + +static u16 channel_id_to_papd(u16 ch_id) +{ + if (WARN_ON(!is_valid_channel(ch_id))) + return 0xff; + + if (1 <= ch_id && ch_id <= 14) + return 0; + if (36 <= ch_id && ch_id <= 64) + return 1; + if (100 <= ch_id && ch_id <= 140) + return 2; + return 3; +} + +static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id) +{ + struct iwl_phy_db_chg_txp *txp_chg; + int i; + u8 ch_index = ch_id_to_ch_index(ch_id); + if (ch_index == 0xff) + return 0xff; + + for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) { + txp_chg = (void *)phy_db->calib_ch_group_txp[i].data; + if (!txp_chg) + return 0xff; + /* + * Looking for the first channel group that its max channel is + * higher then wanted channel. + */ + if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index) + return i; + } + return 0xff; +} +static +int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db, + u32 type, u8 **data, u16 *size, u16 ch_id) +{ + struct iwl_phy_db_entry *entry; + u32 channel_num; + u32 channel_size; + u16 ch_group_id = 0; + u16 index; + + if (!phy_db) + return -EINVAL; + + /* find wanted channel group */ + if (type == IWL_PHY_DB_CALIB_CHG_PAPD) + ch_group_id = channel_id_to_papd(ch_id); + else if (type == IWL_PHY_DB_CALIB_CHG_TXP) + ch_group_id = channel_id_to_txp(phy_db, ch_id); + + entry = iwl_phy_db_get_section(phy_db, type, ch_group_id); + if (!entry) + return -EINVAL; + + if (type == IWL_PHY_DB_CALIB_CH) { + index = ch_id_to_ch_index(ch_id); + channel_num = phy_db->channel_num; + channel_size = phy_db->channel_size; + if (index >= channel_num) { + IWL_ERR(phy_db->trans, "Wrong channel number %d\n", + ch_id); + return -EINVAL; + } + *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size; + *size = channel_size; + } else { + *data = entry->data; + *size = entry->size; + } + + /* Test PIC */ + if (type != IWL_PHY_DB_CFG) + iwl_phy_db_test_pic(*(((__le32 *)*data) + + (*size / sizeof(__le32)) - 1)); + + IWL_DEBUG_INFO(phy_db->trans, + "%s(%d): [PHYDB] GET: Type %d , Size: %d\n", + __func__, __LINE__, type, *size); + + return 0; +} + +static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type, + u16 length, void *data) +{ + struct iwl_phy_db_cmd phy_db_cmd; + struct iwl_host_cmd cmd = { + .id = PHY_DB_CMD, + .flags = CMD_SYNC, + }; + + IWL_DEBUG_INFO(phy_db->trans, + "Sending PHY-DB hcmd of type %d, of length %d\n", + type, length); + + /* Set phy db cmd variables */ + phy_db_cmd.type = cpu_to_le16(type); + phy_db_cmd.length = cpu_to_le16(length); + + /* Set hcmd variables */ + cmd.data[0] = &phy_db_cmd; + cmd.len[0] = sizeof(struct iwl_phy_db_cmd); + cmd.data[1] = data; + cmd.len[1] = length; + cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; + + return iwl_trans_send_cmd(phy_db->trans, &cmd); +} + +static int iwl_phy_db_send_all_channel_groups( + struct iwl_phy_db *phy_db, + enum iwl_phy_db_section_type type, + u8 max_ch_groups) +{ + u16 i; + int err; + struct iwl_phy_db_entry *entry; + + /* Send all the channel specific groups to operational fw */ + for (i = 0; i < max_ch_groups; i++) { + entry = iwl_phy_db_get_section(phy_db, + type, + i); + if (!entry) + return -EINVAL; + + /* Send the requested PHY DB section */ + err = iwl_send_phy_db_cmd(phy_db, + type, + entry->size, + entry->data); + if (err) { + IWL_ERR(phy_db->trans, + "Can't SEND phy_db section %d (%d), err %d", + type, i, err); + return err; + } + + IWL_DEBUG_INFO(phy_db->trans, + "Sent PHY_DB HCMD, type = %d num = %d", + type, i); + } + + return 0; +} + +int iwl_send_phy_db_data(struct iwl_phy_db *phy_db) +{ + u8 *data = NULL; + u16 size = 0; + int err; + + IWL_DEBUG_INFO(phy_db->trans, + "Sending phy db data and configuration to runtime image\n"); + + /* Send PHY DB CFG section */ + err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CFG, + &data, &size, 0); + if (err) { + IWL_ERR(phy_db->trans, "Cannot get Phy DB cfg section\n"); + return err; + } + + err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CFG, size, data); + if (err) { + IWL_ERR(phy_db->trans, + "Cannot send HCMD of Phy DB cfg section\n"); + return err; + } + + err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CALIB_NCH, + &data, &size, 0); + if (err) { + IWL_ERR(phy_db->trans, + "Cannot get Phy DB non specific channel section\n"); + return err; + } + + err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CALIB_NCH, size, data); + if (err) { + IWL_ERR(phy_db->trans, + "Cannot send HCMD of Phy DB non specific channel section\n"); + return err; + } + + /* Send all the TXP channel specific data */ + err = iwl_phy_db_send_all_channel_groups(phy_db, + IWL_PHY_DB_CALIB_CHG_PAPD, + IWL_NUM_PAPD_CH_GROUPS); + if (err) { + IWL_ERR(phy_db->trans, + "Cannot send channel specific PAPD groups"); + return err; + } + + /* Send all the TXP channel specific data */ + err = iwl_phy_db_send_all_channel_groups(phy_db, + IWL_PHY_DB_CALIB_CHG_TXP, + IWL_NUM_TXP_CH_GROUPS); + if (err) { + IWL_ERR(phy_db->trans, + "Cannot send channel specific TX power groups"); + return err; + } + + IWL_DEBUG_INFO(phy_db->trans, + "Finished sending phy db non channel data\n"); + return 0; +} +EXPORT_SYMBOL(iwl_send_phy_db_data); diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h new file mode 100644 index 000000000000..d0e43d96ab38 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h @@ -0,0 +1,82 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __IWL_PHYDB_H__ +#define __IWL_PHYDB_H__ + +#include <linux/types.h> + +#include "iwl-op-mode.h" +#include "iwl-trans.h" + +struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans); + +void iwl_phy_db_free(struct iwl_phy_db *phy_db); + +int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, + gfp_t alloc_ctx); + + +int iwl_send_phy_db_data(struct iwl_phy_db *phy_db); + +#endif /* __IWL_PHYDB_H__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index c3a4bb41e533..f76e9cad7757 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -97,6 +97,9 @@ #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) +/* Device system time */ +#define DEVICE_SYSTEM_TIME_REG 0xA0206C + /** * Tx Scheduler * diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c index 81e8c7126d72..ce0c67b425ee 100644 --- a/drivers/net/wireless/iwlwifi/iwl-test.c +++ b/drivers/net/wireless/iwlwifi/iwl-test.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -466,19 +466,18 @@ static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size) /* Hard-coded periphery absolute address */ if (IWL_ABS_PRPH_START <= addr && addr < IWL_ABS_PRPH_START + PRPH_END) { - spin_lock_irqsave(&trans->reg_lock, flags); - iwl_grab_nic_access(trans); + if (!iwl_trans_grab_nic_access(trans, false, &flags)) { + return -EIO; + } iwl_write32(trans, HBUS_TARG_PRPH_RADDR, addr | (3 << 24)); for (i = 0; i < size; i += 4) *(u32 *)(tst->mem.addr + i) = iwl_read32(trans, HBUS_TARG_PRPH_RDAT); - iwl_release_nic_access(trans); - spin_unlock_irqrestore(&trans->reg_lock, flags); + iwl_trans_release_nic_access(trans, &flags); } else { /* target memory (SRAM) */ - _iwl_read_targ_mem_dwords(trans, addr, - tst->mem.addr, - tst->mem.size / 4); + iwl_trans_read_mem(trans, addr, tst->mem.addr, + tst->mem.size / 4); } tst->mem.nchunks = @@ -501,28 +500,25 @@ static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr, if (IWL_ABS_PRPH_START <= addr && addr < IWL_ABS_PRPH_START + PRPH_END) { - /* Periphery writes can be 1-3 bytes long, or DWORDs */ - if (size < 4) { - memcpy(&val, buf, size); - spin_lock_irqsave(&trans->reg_lock, flags); - iwl_grab_nic_access(trans); - iwl_write32(trans, HBUS_TARG_PRPH_WADDR, - (addr & 0x0000FFFF) | - ((size - 1) << 24)); - iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val); - iwl_release_nic_access(trans); - /* needed after consecutive writes w/o read */ - mmiowb(); - spin_unlock_irqrestore(&trans->reg_lock, flags); - } else { - if (size % 4) - return -EINVAL; - for (i = 0; i < size; i += 4) - iwl_write_prph(trans, addr+i, - *(u32 *)(buf+i)); - } + /* Periphery writes can be 1-3 bytes long, or DWORDs */ + if (size < 4) { + memcpy(&val, buf, size); + if (!iwl_trans_grab_nic_access(trans, false, &flags)) + return -EIO; + iwl_write32(trans, HBUS_TARG_PRPH_WADDR, + (addr & 0x0000FFFF) | + ((size - 1) << 24)); + iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val); + iwl_trans_release_nic_access(trans, &flags); + } else { + if (size % 4) + return -EINVAL; + for (i = 0; i < size; i += 4) + iwl_write_prph(trans, addr+i, + *(u32 *)(buf+i)); + } } else if (iwl_test_valid_hw_addr(tst, addr)) { - _iwl_write_targ_mem_dwords(trans, addr, buf, size / 4); + iwl_trans_write_mem(trans, addr, buf, size / 4); } else { return -EINVAL; } diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h index e13ffa8acc02..7fbf4d717caa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-test.h +++ b/drivers/net/wireless/iwlwifi/iwl-test.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h index 6ba211b09426..a963f45c6849 100644 --- a/drivers/net/wireless/iwlwifi/iwl-testmode.h +++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index b76532e238c1..8c7bec6b9a0b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -65,6 +65,7 @@ #include <linux/ieee80211.h> #include <linux/mm.h> /* for page_address */ +#include <linux/lockdep.h> #include "iwl-debug.h" #include "iwl-config.h" @@ -193,11 +194,11 @@ struct iwl_rx_packet { * @CMD_ON_DEMAND: This command is sent by the test mode pipe. */ enum CMD_MODE { - CMD_SYNC = 0, - CMD_ASYNC = BIT(0), - CMD_WANT_SKB = BIT(1), - CMD_WANT_HCMD = BIT(2), - CMD_ON_DEMAND = BIT(3), + CMD_SYNC = 0, + CMD_ASYNC = BIT(0), + CMD_WANT_SKB = BIT(1), + CMD_WANT_HCMD = BIT(2), + CMD_ON_DEMAND = BIT(3), }; #define DEF_CMD_PAYLOAD_SIZE 320 @@ -274,6 +275,7 @@ struct iwl_rx_cmd_buffer { struct page *_page; int _offset; bool _page_stolen; + u32 _rx_page_order; unsigned int truesize; }; @@ -294,6 +296,11 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) return r->_page; } +static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) +{ + __free_pages(r->_page, r->_rx_page_order); +} + #define MAX_NO_RECLAIM_CMDS 6 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) @@ -308,6 +315,16 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) #define IWL_FRAME_LIMIT 64 /** + * enum iwl_wowlan_status - WoWLAN image/device status + * @IWL_D3_STATUS_ALIVE: firmware is still running after resume + * @IWL_D3_STATUS_RESET: device was reset while suspended + */ +enum iwl_d3_status { + IWL_D3_STATUS_ALIVE, + IWL_D3_STATUS_RESET, +}; + +/** * struct iwl_trans_config - transport configuration * * @op_mode: pointer to the upper layer. @@ -321,6 +338,8 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) * @n_no_reclaim_cmds: # of commands in list * @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs, * if unset 4k will be the RX buffer size + * @bc_table_dword: set to true if the BC table expects the byte count to be + * in DWORD (as opposed to bytes) * @queue_watchdog_timeout: time (in ms) after which queues * are considered stuck and will trigger device restart * @command_names: array of command names, must be 256 entries @@ -335,6 +354,7 @@ struct iwl_trans_config { int n_no_reclaim_cmds; bool rx_buf_size_8k; + bool bc_table_dword; unsigned int queue_watchdog_timeout; const char **command_names; }; @@ -360,9 +380,12 @@ struct iwl_trans; * May sleep * @stop_device:stops the whole device (embedded CPU put to reset) * May sleep - * @wowlan_suspend: put the device into the correct mode for WoWLAN during + * @d3_suspend: put the device into the correct mode for WoWLAN during * suspend. This is optional, if not implemented WoWLAN will not be * supported. This callback may sleep. + * @d3_resume: resume the device after WoWLAN, enabling the opmode to + * talk to the WoWLAN image to get its status. This is optional, if not + * implemented WoWLAN will not be supported. This callback may sleep. * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. * If RFkill is asserted in the middle of a SYNC host command, it must * return -ERFKILL straight away. @@ -387,20 +410,31 @@ struct iwl_trans; * @read32: read a u32 register at offset ofs from the BAR * @read_prph: read a DWORD from a periphery register * @write_prph: write a DWORD to a periphery register + * @read_mem: read device's SRAM in DWORD + * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory + * will be zeroed. * @configure: configure parameters required by the transport layer from * the op_mode. May be called several times before start_fw, can't be * called after that. * @set_pmi: set the power pmi state + * @grab_nic_access: wake the NIC to be able to access non-HBUS regs. + * Sleeping is not allowed between grab_nic_access and + * release_nic_access. + * @release_nic_access: let the NIC go to sleep. The "flags" parameter + * must be the same one that was sent before to the grab_nic_access. + * @set_bits_mask - set SRAM register according to value and mask. */ struct iwl_trans_ops { int (*start_hw)(struct iwl_trans *iwl_trans); void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving); - int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw); + int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, + bool run_in_rfkill); void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); void (*stop_device)(struct iwl_trans *trans); - void (*wowlan_suspend)(struct iwl_trans *trans); + void (*d3_suspend)(struct iwl_trans *trans); + int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status); int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); @@ -424,9 +458,19 @@ struct iwl_trans_ops { u32 (*read32)(struct iwl_trans *trans, u32 ofs); u32 (*read_prph)(struct iwl_trans *trans, u32 ofs); void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val); + int (*read_mem)(struct iwl_trans *trans, u32 addr, + void *buf, int dwords); + int (*write_mem)(struct iwl_trans *trans, u32 addr, + void *buf, int dwords); void (*configure)(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg); void (*set_pmi)(struct iwl_trans *trans, bool state); + bool (*grab_nic_access)(struct iwl_trans *trans, bool silent, + unsigned long *flags); + void (*release_nic_access)(struct iwl_trans *trans, + unsigned long *flags); + void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, + u32 value); }; /** @@ -446,7 +490,6 @@ enum iwl_trans_state { * @ops - pointer to iwl_trans_ops * @op_mode - pointer to the op_mode * @cfg - pointer to the configuration - * @reg_lock - protect hw register access * @dev - pointer to struct device * that represents the device * @hw_id: a u32 with the ID of the device / subdevice. * Set during transport allocation. @@ -467,7 +510,6 @@ struct iwl_trans { struct iwl_op_mode *op_mode; const struct iwl_cfg *cfg; enum iwl_trans_state state; - spinlock_t reg_lock; struct device *dev; u32 hw_rev; @@ -485,6 +527,10 @@ struct iwl_trans { struct dentry *dbgfs_dir; +#ifdef CONFIG_LOCKDEP + struct lockdep_map sync_cmd_lockdep_map; +#endif + /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[0] __aligned(sizeof(void *)); @@ -528,13 +574,14 @@ static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) } static inline int iwl_trans_start_fw(struct iwl_trans *trans, - const struct fw_img *fw) + const struct fw_img *fw, + bool run_in_rfkill) { might_sleep(); WARN_ON_ONCE(!trans->rx_mpdu_cmd); - return trans->ops->start_fw(trans, fw); + return trans->ops->start_fw(trans, fw, run_in_rfkill); } static inline void iwl_trans_stop_device(struct iwl_trans *trans) @@ -546,19 +593,36 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans) trans->state = IWL_TRANS_NO_FW; } -static inline void iwl_trans_wowlan_suspend(struct iwl_trans *trans) +static inline void iwl_trans_d3_suspend(struct iwl_trans *trans) { might_sleep(); - trans->ops->wowlan_suspend(trans); + trans->ops->d3_suspend(trans); +} + +static inline int iwl_trans_d3_resume(struct iwl_trans *trans, + enum iwl_d3_status *status) +{ + might_sleep(); + return trans->ops->d3_resume(trans, status); } static inline int iwl_trans_send_cmd(struct iwl_trans *trans, - struct iwl_host_cmd *cmd) + struct iwl_host_cmd *cmd) { + int ret; + WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, "%s bad state = %d", __func__, trans->state); - return trans->ops->send_cmd(trans, cmd); + if (!(cmd->flags & CMD_ASYNC)) + lock_map_acquire_read(&trans->sync_cmd_lockdep_map); + + ret = trans->ops->send_cmd(trans, cmd); + + if (!(cmd->flags & CMD_ASYNC)) + lock_map_release(&trans->sync_cmd_lockdep_map); + + return ret; } static inline struct iwl_device_cmd * @@ -636,7 +700,7 @@ static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans) } static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, - struct dentry *dir) + struct dentry *dir) { return trans->ops->dbgfs_register(trans, dir); } @@ -679,15 +743,77 @@ static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, return trans->ops->write_prph(trans, ofs, val); } +static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords) +{ + return trans->ops->read_mem(trans, addr, buf, dwords); +} + +#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ + do { \ + if (__builtin_constant_p(bufsize)) \ + BUILD_BUG_ON((bufsize) % sizeof(u32)); \ + iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ + } while (0) + +static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) +{ + u32 value; + + if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1))) + return 0xa5a5a5a5; + + return value; +} + +static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords) +{ + return trans->ops->write_mem(trans, addr, buf, dwords); +} + +static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, + u32 val) +{ + return iwl_trans_write_mem(trans, addr, &val, 1); +} + static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) { trans->ops->set_pmi(trans, state); } +static inline void +iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) +{ + trans->ops->set_bits_mask(trans, reg, mask, value); +} + +#define iwl_trans_grab_nic_access(trans, silent, flags) \ + __cond_lock(nic_access, \ + likely((trans)->ops->grab_nic_access(trans, silent, flags))) + +static inline void __releases(nic_access) +iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags) +{ + trans->ops->release_nic_access(trans, flags); + __release(nic_access); +} + /***************************************************** * driver (transport) register/unregister functions ******************************************************/ int __must_check iwl_pci_register_driver(void); void iwl_pci_unregister_driver(void); +static inline void trans_lockdep_init(struct iwl_trans *trans) +{ +#ifdef CONFIG_LOCKDEP + static struct lock_class_key __key; + + lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", + &__key, 0); +#endif +} + #endif /* __iwl_trans_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile new file mode 100644 index 000000000000..807b250ec396 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/Makefile @@ -0,0 +1,10 @@ +obj-$(CONFIG_IWLMVM) += iwlmvm.o +iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o +iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o +iwlmvm-y += scan.o time-event.o rs.o +iwlmvm-y += power.o +iwlmvm-y += led.o +iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o +iwlmvm-$(CONFIG_PM_SLEEP) += d3.o + +ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ diff --git a/drivers/net/wireless/iwlwifi/mvm/binding.c b/drivers/net/wireless/iwlwifi/mvm/binding.c new file mode 100644 index 000000000000..73d24aacb90a --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/binding.c @@ -0,0 +1,197 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <net/mac80211.h> +#include "fw-api.h" +#include "mvm.h" + +struct iwl_mvm_iface_iterator_data { + struct ieee80211_vif *ignore_vif; + int idx; + + struct iwl_mvm_phy_ctxt *phyctxt; + + u16 ids[MAX_MACS_IN_BINDING]; + u16 colors[MAX_MACS_IN_BINDING]; +}; + +static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action, + struct iwl_mvm_iface_iterator_data *data) +{ + struct iwl_binding_cmd cmd; + struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt; + int i, ret; + u32 status; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, + phyctxt->color)); + cmd.action = cpu_to_le32(action); + cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, + phyctxt->color)); + + for (i = 0; i < MAX_MACS_IN_BINDING; i++) + cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); + for (i = 0; i < data->idx; i++) + cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i], + data->colors[i])); + + status = 0; + ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, + sizeof(cmd), &cmd, &status); + if (ret) { + IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n", + action, ret); + return ret; + } + + if (status) { + IWL_ERR(mvm, "Binding command failed: %u\n", status); + ret = -EIO; + } + + return ret; +} + +static void iwl_mvm_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_iface_iterator_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (vif == data->ignore_vif) + return; + + if (mvmvif->phy_ctxt != data->phyctxt) + return; + + if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING)) + return; + + data->ids[data->idx] = mvmvif->id; + data->colors[data->idx] = mvmvif->color; + data->idx++; +} + +static int iwl_mvm_binding_update(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_phy_ctxt *phyctxt, + bool add) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_iface_iterator_data data = { + .ignore_vif = vif, + .phyctxt = phyctxt, + }; + u32 action = FW_CTXT_ACTION_MODIFY; + + lockdep_assert_held(&mvm->mutex); + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_iface_iterator, + &data); + + /* + * If there are no other interfaces yet we + * need to create a new binding. + */ + if (data.idx == 0) { + if (add) + action = FW_CTXT_ACTION_ADD; + else + action = FW_CTXT_ACTION_REMOVE; + } + + if (add) { + if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING)) + return -EINVAL; + + data.ids[data.idx] = mvmvif->id; + data.colors[data.idx] = mvmvif->color; + data.idx++; + } + + return iwl_mvm_binding_cmd(mvm, action, &data); +} + +int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) + return -EINVAL; + + return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true); +} + +int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) + return -EINVAL; + + return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c new file mode 100644 index 000000000000..c64d864799cd --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -0,0 +1,955 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <net/cfg80211.h> +#include <net/ipv6.h> +#include "iwl-modparams.h" +#include "fw-api.h" +#include "mvm.h" + +void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (iwlwifi_mod_params.sw_crypto) + return; + + mutex_lock(&mvm->mutex); + + memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN); + memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN); + mvmvif->rekey_data.replay_ctr = + cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr)); + mvmvif->rekey_data.valid = true; + + mutex_unlock(&mvm->mutex); +} + +#if IS_ENABLED(CONFIG_IPV6) +void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct inet6_ifaddr *ifa; + int idx = 0; + + read_lock_bh(&idev->lock); + list_for_each_entry(ifa, &idev->addr_list, if_list) { + mvmvif->target_ipv6_addrs[idx] = ifa->addr; + idx++; + if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS) + break; + } + read_unlock_bh(&idev->lock); + + mvmvif->num_target_ipv6_addrs = idx; +} +#endif + +void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int idx) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mvmvif->tx_key_idx = idx; +} + +static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out) +{ + int i; + + for (i = 0; i < IWL_P1K_SIZE; i++) + out[i] = cpu_to_le16(p1k[i]); +} + +struct wowlan_key_data { + struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; + struct iwl_wowlan_tkip_params_cmd *tkip; + bool error, use_rsc_tsc, use_tkip; + int gtk_key_idx; +}; + +static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct wowlan_key_data *data = _data; + struct aes_sc *aes_sc, *aes_tx_sc = NULL; + struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; + struct iwl_p1k_cache *rx_p1ks; + u8 *rx_mic_key; + struct ieee80211_key_seq seq; + u32 cur_rx_iv32 = 0; + u16 p1k[IWL_P1K_SIZE]; + int ret, i; + + mutex_lock(&mvm->mutex); + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */ + struct { + struct iwl_mvm_wep_key_cmd wep_key_cmd; + struct iwl_mvm_wep_key wep_key; + } __packed wkc = { + .wep_key_cmd.mac_id_n_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)), + .wep_key_cmd.num_keys = 1, + /* firmware sets STA_KEY_FLG_WEP_13BYTES */ + .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP, + .wep_key.key_index = key->keyidx, + .wep_key.key_size = key->keylen, + }; + + /* + * This will fail -- the key functions don't set support + * pairwise WEP keys. However, that's better than silently + * failing WoWLAN. Or maybe not? + */ + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + break; + + memcpy(&wkc.wep_key.key[3], key->key, key->keylen); + if (key->keyidx == mvmvif->tx_key_idx) { + /* TX key must be at offset 0 */ + wkc.wep_key.key_offset = 0; + } else { + /* others start at 1 */ + data->gtk_key_idx++; + wkc.wep_key.key_offset = data->gtk_key_idx; + } + + ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC, + sizeof(wkc), &wkc); + data->error = ret != 0; + + /* don't upload key again */ + goto out_unlock; + } + default: + data->error = true; + goto out_unlock; + case WLAN_CIPHER_SUITE_AES_CMAC: + /* + * Ignore CMAC keys -- the WoWLAN firmware doesn't support them + * but we also shouldn't abort suspend due to that. It does have + * support for the IGTK key renewal, but doesn't really use the + * IGTK for anything. This means we could spuriously wake up or + * be deauthenticated, but that was considered acceptable. + */ + goto out_unlock; + case WLAN_CIPHER_SUITE_TKIP: + if (sta) { + tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; + tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; + + rx_p1ks = data->tkip->rx_uni; + + ieee80211_get_key_tx_seq(key, &seq); + tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); + tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); + + ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); + iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k); + + memcpy(data->tkip->mic_keys.tx, + &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], + IWL_MIC_KEY_SIZE); + + rx_mic_key = data->tkip->mic_keys.rx_unicast; + } else { + tkip_sc = + data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; + rx_p1ks = data->tkip->rx_multi; + rx_mic_key = data->tkip->mic_keys.rx_mcast; + } + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 (as they need to to avoid replay attacks) + * for checking the IV in the frames. + */ + for (i = 0; i < IWL_NUM_RSC; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); + tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); + tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); + /* wrapping isn't allowed, AP must rekey */ + if (seq.tkip.iv32 > cur_rx_iv32) + cur_rx_iv32 = seq.tkip.iv32; + } + + ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, + cur_rx_iv32, p1k); + iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); + ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, + cur_rx_iv32 + 1, p1k); + iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); + + memcpy(rx_mic_key, + &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], + IWL_MIC_KEY_SIZE); + + data->use_tkip = true; + data->use_rsc_tsc = true; + break; + case WLAN_CIPHER_SUITE_CCMP: + if (sta) { + u8 *pn = seq.ccmp.pn; + + aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; + aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; + + ieee80211_get_key_tx_seq(key, &seq); + aes_tx_sc->pn = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } else { + aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; + } + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 for checking the IV in the frames. + */ + for (i = 0; i < IWL_NUM_RSC; i++) { + u8 *pn = seq.ccmp.pn; + + ieee80211_get_key_rx_seq(key, i, &seq); + aes_sc->pn = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } + data->use_rsc_tsc = true; + break; + } + + /* + * The D3 firmware hardcodes the key offset 0 as the key it uses + * to transmit packets to the AP, i.e. the PTK. + */ + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + key->hw_key_idx = 0; + } else { + data->gtk_key_idx++; + key->hw_key_idx = data->gtk_key_idx; + } + + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true); + data->error = ret != 0; +out_unlock: + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, + struct cfg80211_wowlan *wowlan) +{ + struct iwl_wowlan_patterns_cmd *pattern_cmd; + struct iwl_host_cmd cmd = { + .id = WOWLAN_PATTERNS, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + .flags = CMD_SYNC, + }; + int i, err; + + if (!wowlan->n_patterns) + return 0; + + cmd.len[0] = sizeof(*pattern_cmd) + + wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern); + + pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); + if (!pattern_cmd) + return -ENOMEM; + + pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); + + for (i = 0; i < wowlan->n_patterns; i++) { + int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); + + memcpy(&pattern_cmd->patterns[i].mask, + wowlan->patterns[i].mask, mask_len); + memcpy(&pattern_cmd->patterns[i].pattern, + wowlan->patterns[i].pattern, + wowlan->patterns[i].pattern_len); + pattern_cmd->patterns[i].mask_size = mask_len; + pattern_cmd->patterns[i].pattern_size = + wowlan->patterns[i].pattern_len; + } + + cmd.data[0] = pattern_cmd; + err = iwl_mvm_send_cmd(mvm, &cmd); + kfree(pattern_cmd); + return err; +} + +static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_proto_offload_cmd cmd = {}; +#if IS_ENABLED(CONFIG_IPV6) + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int i; + + if (mvmvif->num_target_ipv6_addrs) { + cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_NS); + memcpy(cmd.ndp_mac_addr, vif->addr, ETH_ALEN); + } + + BUILD_BUG_ON(sizeof(cmd.target_ipv6_addr[i]) != + sizeof(mvmvif->target_ipv6_addrs[i])); + + for (i = 0; i < mvmvif->num_target_ipv6_addrs; i++) + memcpy(cmd.target_ipv6_addr[i], + &mvmvif->target_ipv6_addrs[i], + sizeof(cmd.target_ipv6_addr[i])); +#endif + + if (vif->bss_conf.arp_addr_cnt) { + cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_ARP); + cmd.host_ipv4_addr = vif->bss_conf.arp_addr_list[0]; + memcpy(cmd.arp_mac_addr, vif->addr, ETH_ALEN); + } + + if (!cmd.enabled) + return 0; + + return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC, + sizeof(cmd), &cmd); +} + +struct iwl_d3_iter_data { + struct iwl_mvm *mvm; + struct ieee80211_vif *vif; + bool error; +}; + +static void iwl_mvm_d3_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_d3_iter_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return; + + if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) + return; + + if (data->vif) { + IWL_ERR(data->mvm, "More than one managed interface active!\n"); + data->error = true; + return; + } + + data->vif = vif; +} + +static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *ap_sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_chanctx_conf *ctx; + u8 chains_static, chains_dynamic; + struct cfg80211_chan_def chandef; + int ret, i; + struct iwl_binding_cmd binding_cmd = {}; + struct iwl_time_quota_cmd quota_cmd = {}; + u32 status; + + /* add back the PHY */ + if (WARN_ON(!mvmvif->phy_ctxt)) + return -EINVAL; + + rcu_read_lock(); + ctx = rcu_dereference(vif->chanctx_conf); + if (WARN_ON(!ctx)) { + rcu_read_unlock(); + return -EINVAL; + } + chandef = ctx->def; + chains_static = ctx->rx_chains_static; + chains_dynamic = ctx->rx_chains_dynamic; + rcu_read_unlock(); + + ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, + chains_static, chains_dynamic); + if (ret) + return ret; + + /* add back the MAC */ + mvmvif->uploaded = false; + + if (WARN_ON(!vif->bss_conf.assoc)) + return -EINVAL; + /* hack */ + vif->bss_conf.assoc = false; + ret = iwl_mvm_mac_ctxt_add(mvm, vif); + vif->bss_conf.assoc = true; + if (ret) + return ret; + + /* add back binding - XXX refactor? */ + binding_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, + mvmvif->phy_ctxt->color)); + binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); + binding_cmd.phy = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, + mvmvif->phy_ctxt->color)); + binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + for (i = 1; i < MAX_MACS_IN_BINDING; i++) + binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); + + status = 0; + ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, + sizeof(binding_cmd), &binding_cmd, + &status); + if (ret) { + IWL_ERR(mvm, "Failed to add binding: %d\n", ret); + return ret; + } + + if (status) { + IWL_ERR(mvm, "Binding command failed: %u\n", status); + return -EIO; + } + + ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false); + if (ret) + return ret; + rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); + + ret = iwl_mvm_mac_ctxt_changed(mvm, vif); + if (ret) + return ret; + + /* and some quota */ + quota_cmd.quotas[0].id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, + mvmvif->phy_ctxt->color)); + quota_cmd.quotas[0].quota = cpu_to_le32(100); + quota_cmd.quotas[0].max_duration = cpu_to_le32(1000); + + for (i = 1; i < MAX_BINDINGS; i++) + quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID); + + ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC, + sizeof(quota_cmd), "a_cmd); + if (ret) + IWL_ERR(mvm, "Failed to send quota: %d\n", ret); + + return 0; +} + +int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_d3_iter_data suspend_iter_data = { + .mvm = mvm, + }; + struct ieee80211_vif *vif; + struct iwl_mvm_vif *mvmvif; + struct ieee80211_sta *ap_sta; + struct iwl_mvm_sta *mvm_ap_sta; + struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; + struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; + struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; + struct iwl_d3_manager_config d3_cfg_cmd = {}; + struct wowlan_key_data key_data = { + .use_rsc_tsc = false, + .tkip = &tkip_cmd, + .use_tkip = false, + }; + int ret, i; + u16 seq; + u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT; + + if (WARN_ON(!wowlan)) + return -EINVAL; + + key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); + if (!key_data.rsc_tsc) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + + old_aux_sta_id = mvm->aux_sta.sta_id; + + /* see if there's only a single BSS vif and it's associated */ + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_d3_iface_iterator, &suspend_iter_data); + + if (suspend_iter_data.error || !suspend_iter_data.vif) { + ret = 1; + goto out_noreset; + } + + vif = suspend_iter_data.vif; + mvmvif = iwl_mvm_vif_from_mac80211(vif); + + ap_sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(ap_sta)) { + ret = -EINVAL; + goto out_noreset; + } + + mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv; + + /* + * The D3 firmware still hardcodes the AP station ID for the + * BSS we're associated with as 0. Store the real STA ID here + * and assign 0. When we leave this function, we'll restore + * the original value for the resume code. + */ + old_ap_sta_id = mvm_ap_sta->sta_id; + mvm_ap_sta->sta_id = 0; + mvmvif->ap_sta_id = 0; + + /* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */ + + wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported; + + /* + * We know the last used seqno, and the uCode expects to know that + * one, it will increment before TX. + */ + seq = mvm_ap_sta->last_seq_ctl & IEEE80211_SCTL_SEQ; + wowlan_config_cmd.non_qos_seq = cpu_to_le16(seq); + + /* + * For QoS counters, we store the one to use next, so subtract 0x10 + * since the uCode will add 0x10 *before* using the value while we + * increment after using the value (i.e. store the next value to use). + */ + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + seq = mvm_ap_sta->tid_data[i].seq_number; + seq -= 0x10; + wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq); + } + + if (wowlan->disconnect) + wowlan_config_cmd.wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | + IWL_WOWLAN_WAKEUP_LINK_CHANGE); + if (wowlan->magic_pkt) + wowlan_config_cmd.wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); + if (wowlan->gtk_rekey_failure) + wowlan_config_cmd.wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); + if (wowlan->eap_identity_req) + wowlan_config_cmd.wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); + if (wowlan->four_way_handshake) + wowlan_config_cmd.wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); + if (wowlan->n_patterns) + wowlan_config_cmd.wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); + + if (wowlan->rfkill_release) + d3_cfg_cmd.wakeup_flags |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); + + iwl_mvm_cancel_scan(mvm); + + iwl_trans_stop_device(mvm->trans); + + /* + * Set the HW restart bit -- this is mostly true as we're + * going to load new firmware and reprogram that, though + * the reprogramming is going to be manual to avoid adding + * all the MACs that aren't support. + * We don't have to clear up everything though because the + * reprogramming is manual. When we resume, we'll actually + * go through a proper restart sequence again to switch + * back to the runtime firmware image. + */ + set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + + /* We reprogram keys and shouldn't allocate new key indices */ + memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); + + /* + * The D3 firmware still hardcodes the AP station ID for the + * BSS we're associated with as 0. As a result, we have to move + * the auxiliary station to ID 1 so the ID 0 remains free for + * the AP station for later. + * We set the sta_id to 1 here, and reset it to its previous + * value (that we stored above) later. + */ + mvm->aux_sta.sta_id = 1; + + ret = iwl_mvm_load_d3_fw(mvm); + if (ret) + goto out; + + ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); + if (ret) + goto out; + + if (!iwlwifi_mod_params.sw_crypto) { + /* + * This needs to be unlocked due to lock ordering + * constraints. Since we're in the suspend path + * that isn't really a problem though. + */ + mutex_unlock(&mvm->mutex); + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_wowlan_program_keys, + &key_data); + mutex_lock(&mvm->mutex); + if (key_data.error) { + ret = -EIO; + goto out; + } + + if (key_data.use_rsc_tsc) { + struct iwl_host_cmd rsc_tsc_cmd = { + .id = WOWLAN_TSC_RSC_PARAM, + .flags = CMD_SYNC, + .data[0] = key_data.rsc_tsc, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + .len[0] = sizeof(*key_data.rsc_tsc), + }; + + ret = iwl_mvm_send_cmd(mvm, &rsc_tsc_cmd); + if (ret) + goto out; + } + + if (key_data.use_tkip) { + ret = iwl_mvm_send_cmd_pdu(mvm, + WOWLAN_TKIP_PARAM, + CMD_SYNC, sizeof(tkip_cmd), + &tkip_cmd); + if (ret) + goto out; + } + + if (mvmvif->rekey_data.valid) { + memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); + memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, + NL80211_KCK_LEN); + kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); + memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, + NL80211_KEK_LEN); + kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); + kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; + + ret = iwl_mvm_send_cmd_pdu(mvm, + WOWLAN_KEK_KCK_MATERIAL, + CMD_SYNC, + sizeof(kek_kck_cmd), + &kek_kck_cmd); + if (ret) + goto out; + } + } + + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, + CMD_SYNC, sizeof(wowlan_config_cmd), + &wowlan_config_cmd); + if (ret) + goto out; + + ret = iwl_mvm_send_patterns(mvm, wowlan); + if (ret) + goto out; + + ret = iwl_mvm_send_proto_offload(mvm, vif); + if (ret) + goto out; + + /* must be last -- this switches firmware state */ + ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC, + sizeof(d3_cfg_cmd), &d3_cfg_cmd); + if (ret) + goto out; + + clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + + iwl_trans_d3_suspend(mvm->trans); + out: + mvm->aux_sta.sta_id = old_aux_sta_id; + mvm_ap_sta->sta_id = old_ap_sta_id; + mvmvif->ap_sta_id = old_ap_sta_id; + out_noreset: + kfree(key_data.rsc_tsc); + if (ret < 0) + ieee80211_restart_hw(mvm->hw); + + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + u32 base = mvm->error_event_table; + struct error_table_start { + /* cf. struct iwl_error_event_table */ + u32 valid; + u32 error_id; + } err_info; + struct cfg80211_wowlan_wakeup wakeup = { + .pattern_idx = -1, + }; + struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; + struct iwl_host_cmd cmd = { + .id = WOWLAN_GET_STATUSES, + .flags = CMD_SYNC | CMD_WANT_SKB, + }; + struct iwl_wowlan_status *status; + u32 reasons; + int ret, len; + bool pkt8023 = false; + struct sk_buff *pkt = NULL; + + iwl_trans_read_mem_bytes(mvm->trans, base, + &err_info, sizeof(err_info)); + + if (err_info.valid) { + IWL_INFO(mvm, "error table is valid (%d)\n", + err_info.valid); + if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { + wakeup.rfkill_release = true; + ieee80211_report_wowlan_wakeup(vif, &wakeup, + GFP_KERNEL); + } + return; + } + + /* only for tracing for now */ + ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL); + if (ret) + IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + IWL_ERR(mvm, "failed to query status (%d)\n", ret); + return; + } + + /* RF-kill already asserted again... */ + if (!cmd.resp_pkt) + return; + + len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) { + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); + goto out; + } + + status = (void *)cmd.resp_pkt->data; + + if (len - sizeof(struct iwl_cmd_header) != + sizeof(*status) + le32_to_cpu(status->wake_packet_bufsize)) { + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); + goto out; + } + + reasons = le32_to_cpu(status->wakeup_reasons); + + if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { + wakeup_report = NULL; + goto report; + } + + if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) { + wakeup.magic_pkt = true; + pkt8023 = true; + } + + if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) { + wakeup.pattern_idx = + le16_to_cpu(status->pattern_number); + pkt8023 = true; + } + + if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) + wakeup.disconnect = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) { + wakeup.gtk_rekey_failure = true; + pkt8023 = true; + } + + if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) { + wakeup.rfkill_release = true; + pkt8023 = true; + } + + if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) { + wakeup.eap_identity_req = true; + pkt8023 = true; + } + + if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) { + wakeup.four_way_handshake = true; + pkt8023 = true; + } + + if (status->wake_packet_bufsize) { + u32 pktsize = le32_to_cpu(status->wake_packet_bufsize); + u32 pktlen = le32_to_cpu(status->wake_packet_length); + + if (pkt8023) { + pkt = alloc_skb(pktsize, GFP_KERNEL); + if (!pkt) + goto report; + memcpy(skb_put(pkt, pktsize), status->wake_packet, + pktsize); + if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) + goto report; + wakeup.packet = pkt->data; + wakeup.packet_present_len = pkt->len; + wakeup.packet_len = pkt->len - (pktlen - pktsize); + wakeup.packet_80211 = false; + } else { + wakeup.packet = status->wake_packet; + wakeup.packet_present_len = pktsize; + wakeup.packet_len = pktlen; + wakeup.packet_80211 = true; + } + } + + report: + ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); + kfree_skb(pkt); + + out: + iwl_free_resp(&cmd); +} + +int iwl_mvm_resume(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_d3_iter_data resume_iter_data = { + .mvm = mvm, + }; + struct ieee80211_vif *vif = NULL; + int ret; + enum iwl_d3_status d3_status; + + mutex_lock(&mvm->mutex); + + /* get the BSS vif pointer again */ + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_d3_iface_iterator, &resume_iter_data); + + if (WARN_ON(resume_iter_data.error || !resume_iter_data.vif)) + goto out_unlock; + + vif = resume_iter_data.vif; + + ret = iwl_trans_d3_resume(mvm->trans, &d3_status); + if (ret) + goto out_unlock; + + if (d3_status != IWL_D3_STATUS_ALIVE) { + IWL_INFO(mvm, "Device was reset during suspend\n"); + goto out_unlock; + } + + iwl_mvm_query_wakeup_reasons(mvm, vif); + + out_unlock: + mutex_unlock(&mvm->mutex); + + if (vif) + ieee80211_resume_disconnect(vif); + + /* return 1 to reconfigure the device */ + set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + return 1; +} + +void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + device_set_wakeup_enable(mvm->trans->dev, enabled); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c new file mode 100644 index 000000000000..c1bdb5582126 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -0,0 +1,378 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "mvm.h" +#include "sta.h" +#include "iwl-io.h" + +struct iwl_dbgfs_mvm_ctx { + struct iwl_mvm *mvm; + struct ieee80211_vif *vif; +}; + +static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t iwl_dbgfs_tx_flush_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + + char buf[16]; + int buf_size, ret; + u32 scd_q_msk; + + if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR) + return -EIO; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x", &scd_q_msk) != 1) + return -EINVAL; + + IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk); + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, true) ? : count; + mutex_unlock(&mvm->mutex); + + return ret; +} + +static ssize_t iwl_dbgfs_sta_drain_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct ieee80211_sta *sta; + + char buf[8]; + int buf_size, sta_id, drain, ret; + + if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR) + return -EIO; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) + return -EINVAL; + + mutex_lock(&mvm->mutex); + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta)) + ret = -ENOENT; + else + ret = iwl_mvm_drain_sta(mvm, (void *)sta->drv_priv, drain) ? : + count; + + mutex_unlock(&mvm->mutex); + + return ret; +} + +static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + const struct fw_img *img; + int ofs, len, pos = 0; + size_t bufsz, ret; + char *buf; + u8 *ptr; + + /* default is to dump the entire data segment */ + if (!mvm->dbgfs_sram_offset && !mvm->dbgfs_sram_len) { + mvm->dbgfs_sram_offset = 0x800000; + if (!mvm->ucode_loaded) + return -EINVAL; + img = &mvm->fw->img[mvm->cur_ucode]; + mvm->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; + } + len = mvm->dbgfs_sram_len; + + bufsz = len * 4 + 256; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ptr = kzalloc(len, GFP_KERNEL); + if (!ptr) { + kfree(buf); + return -ENOMEM; + } + + pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", len); + pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", + mvm->dbgfs_sram_offset); + + iwl_trans_read_mem_bytes(mvm->trans, + mvm->dbgfs_sram_offset, + ptr, len); + for (ofs = 0; ofs < len; ofs += 16) { + pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos, + bufsz - pos, false); + pos += strlen(buf + pos); + if (bufsz - pos > 0) + buf[pos++] = '\n'; + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + + kfree(buf); + kfree(ptr); + + return ret; +} + +static ssize_t iwl_dbgfs_sram_write(struct file *file, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[64]; + int buf_size; + u32 offset, len; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x,%x", &offset, &len) == 2) { + if ((offset & 0x3) || (len & 0x3)) + return -EINVAL; + mvm->dbgfs_sram_offset = offset; + mvm->dbgfs_sram_len = len; + } else { + mvm->dbgfs_sram_offset = 0; + mvm->dbgfs_sram_len = 0; + } + + return count; +} + +static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct ieee80211_sta *sta; + char buf[400]; + int i, pos = 0, bufsz = sizeof(buf); + + mutex_lock(&mvm->mutex); + + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i); + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (!sta) + pos += scnprintf(buf + pos, bufsz - pos, "N/A\n"); + else if (IS_ERR(sta)) + pos += scnprintf(buf + pos, bufsz - pos, "%ld\n", + PTR_ERR(sta)); + else + pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", + sta->addr); + } + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_power_down_allow_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[8] = {}; + int allow; + + if (!mvm->ucode_loaded) + return -EIO; + + if (copy_from_user(buf, user_buf, sizeof(buf))) + return -EFAULT; + + if (sscanf(buf, "%d", &allow) != 1) + return -EINVAL; + + IWL_DEBUG_POWER(mvm, "%s device power down\n", + allow ? "allow" : "prevent"); + + /* + * TODO: Send REPLY_DEBUG_CMD (0xf0) when FW support it + */ + + return count; +} + +static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[8] = {}; + int allow; + + if (copy_from_user(buf, user_buf, sizeof(buf))) + return -EFAULT; + + if (sscanf(buf, "%d", &allow) != 1) + return -EINVAL; + + IWL_DEBUG_POWER(mvm, "%s device power down in d3\n", + allow ? "allow" : "prevent"); + + /* + * TODO: When WoWLAN FW alive notification happens, driver will send + * REPLY_DEBUG_CMD setting power_down_allow flag according to + * mvm->prevent_power_down_d3 + */ + mvm->prevent_power_down_d3 = !allow; + + return count; +} + +#define MVM_DEBUGFS_READ_FILE_OPS(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .read = iwl_dbgfs_##name##_read, \ + .open = iwl_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +} + +#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = iwl_dbgfs_##name##_write, \ + .read = iwl_dbgfs_##name##_read, \ + .open = iwl_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +#define MVM_DEBUGFS_WRITE_FILE_OPS(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = iwl_dbgfs_##name##_write, \ + .open = iwl_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do { \ + if (!debugfs_create_file(#name, mode, parent, mvm, \ + &iwl_dbgfs_##name##_ops)) \ + goto err; \ + } while (0) + +#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \ + if (!debugfs_create_file(#name, mode, parent, vif, \ + &iwl_dbgfs_##name##_ops)) \ + goto err; \ + } while (0) + +/* Device wide debugfs entries */ +MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush); +MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram); +MVM_DEBUGFS_READ_FILE_OPS(stations); +MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow); +MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow); + +int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) +{ + char buf[100]; + + mvm->debugfs_dir = dbgfs_dir; + + MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR); + MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR); + + /* + * Create a symlink with mac80211. It will be removed when mac80211 + * exists (before the opmode exists which removes the target.) + */ + snprintf(buf, 100, "../../%s/%s", + dbgfs_dir->d_parent->d_parent->d_name.name, + dbgfs_dir->d_parent->d_name.name); + if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf)) + goto err; + + return 0; +err: + IWL_ERR(mvm, "Can't create the mvm debugfs directory\n"); + return -ENOMEM; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h new file mode 100644 index 000000000000..cf6f9a02fb74 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h @@ -0,0 +1,282 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __fw_api_d3_h__ +#define __fw_api_d3_h__ + +/** + * enum iwl_d3_wakeup_flags - D3 manager wakeup flags + * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert + */ +enum iwl_d3_wakeup_flags { + IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0), +}; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */ + +/** + * struct iwl_d3_manager_config - D3 manager configuration command + * @min_sleep_time: minimum sleep time (in usec) + * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags + * + * The structure is used for the D3_CONFIG_CMD command. + */ +struct iwl_d3_manager_config { + __le32 min_sleep_time; + __le32 wakeup_flags; +} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_3 */ + + +/* TODO: OFFLOADS_QUERY_API_S_VER_1 */ + +/** + * enum iwl_d3_proto_offloads - enabled protocol offloads + * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled + * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled + */ +enum iwl_proto_offloads { + IWL_D3_PROTO_OFFLOAD_ARP = BIT(0), + IWL_D3_PROTO_OFFLOAD_NS = BIT(1), +}; + +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS 2 + +/** + * struct iwl_proto_offload_cmd - ARP/NS offload configuration + * @enabled: enable flags + * @remote_ipv4_addr: remote address to answer to (or zero if all) + * @host_ipv4_addr: our IPv4 address to respond to queries for + * @arp_mac_addr: our MAC address for ARP responses + * @remote_ipv6_addr: remote address to answer to (or zero if all) + * @solicited_node_ipv6_addr: broken -- solicited node address exists + * for each target address + * @target_ipv6_addr: our target addresses + * @ndp_mac_addr: neighbor soliciation response MAC address + */ +struct iwl_proto_offload_cmd { + __le32 enabled; + __be32 remote_ipv4_addr; + __be32 host_ipv4_addr; + u8 arp_mac_addr[ETH_ALEN]; + __le16 reserved1; + + u8 remote_ipv6_addr[16]; + u8 solicited_node_ipv6_addr[16]; + u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS][16]; + u8 ndp_mac_addr[ETH_ALEN]; + __le16 reserved2; +} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */ + + +/* + * WOWLAN_PATTERNS + */ +#define IWL_WOWLAN_MIN_PATTERN_LEN 16 +#define IWL_WOWLAN_MAX_PATTERN_LEN 128 + +struct iwl_wowlan_pattern { + u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; + u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN]; + u8 mask_size; + u8 pattern_size; + __le16 reserved; +} __packed; /* WOWLAN_PATTERN_API_S_VER_1 */ + +#define IWL_WOWLAN_MAX_PATTERNS 20 + +struct iwl_wowlan_patterns_cmd { + __le32 n_patterns; + struct iwl_wowlan_pattern patterns[]; +} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */ + +enum iwl_wowlan_wakeup_filters { + IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0), + IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1), + IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2), + IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3), + IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4), + IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5), + IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6), + IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7), + IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8), + IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9), + IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10), + /* BIT(11) reserved */ + IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12), +}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */ + +struct iwl_wowlan_config_cmd { + __le32 wakeup_filter; + __le16 non_qos_seq; + __le16 qos_seq[8]; + u8 wowlan_ba_teardown_tids; + u8 is_11n_connection; +} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */ + +/* + * WOWLAN_TSC_RSC_PARAMS + */ +#define IWL_NUM_RSC 16 + +struct tkip_sc { + __le16 iv16; + __le16 pad; + __le32 iv32; +} __packed; /* TKIP_SC_API_U_VER_1 */ + +struct iwl_tkip_rsc_tsc { + struct tkip_sc unicast_rsc[IWL_NUM_RSC]; + struct tkip_sc multicast_rsc[IWL_NUM_RSC]; + struct tkip_sc tsc; +} __packed; /* TKIP_TSC_RSC_API_S_VER_1 */ + +struct aes_sc { + __le64 pn; +} __packed; /* TKIP_AES_SC_API_U_VER_1 */ + +struct iwl_aes_rsc_tsc { + struct aes_sc unicast_rsc[IWL_NUM_RSC]; + struct aes_sc multicast_rsc[IWL_NUM_RSC]; + struct aes_sc tsc; +} __packed; /* AES_TSC_RSC_API_S_VER_1 */ + +union iwl_all_tsc_rsc { + struct iwl_tkip_rsc_tsc tkip; + struct iwl_aes_rsc_tsc aes; +}; /* ALL_TSC_RSC_API_S_VER_2 */ + +struct iwl_wowlan_rsc_tsc_params_cmd { + union iwl_all_tsc_rsc all_tsc_rsc; +} __packed; /* ALL_TSC_RSC_API_S_VER_2 */ + +#define IWL_MIC_KEY_SIZE 8 +struct iwl_mic_keys { + u8 tx[IWL_MIC_KEY_SIZE]; + u8 rx_unicast[IWL_MIC_KEY_SIZE]; + u8 rx_mcast[IWL_MIC_KEY_SIZE]; +} __packed; /* MIC_KEYS_API_S_VER_1 */ + +#define IWL_P1K_SIZE 5 +struct iwl_p1k_cache { + __le16 p1k[IWL_P1K_SIZE]; +} __packed; + +#define IWL_NUM_RX_P1K_CACHE 2 + +struct iwl_wowlan_tkip_params_cmd { + struct iwl_mic_keys mic_keys; + struct iwl_p1k_cache tx; + struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE]; + struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE]; +} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */ + +#define IWL_KCK_MAX_SIZE 32 +#define IWL_KEK_MAX_SIZE 32 + +struct iwl_wowlan_kek_kck_material_cmd { + u8 kck[IWL_KCK_MAX_SIZE]; + u8 kek[IWL_KEK_MAX_SIZE]; + __le16 kck_len; + __le16 kek_len; + __le64 replay_ctr; +} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */ + +#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87 + +enum iwl_wowlan_rekey_status { + IWL_WOWLAN_REKEY_POST_REKEY = 0, + IWL_WOWLAN_REKEY_WHILE_REKEY = 1, +}; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */ + +enum iwl_wowlan_wakeup_reason { + IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0, + IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0), + IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1), + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2), + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3), + IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4), + IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5), + IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6), + IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7), + IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8), + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9), + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10), + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11), + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12), +}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ + +struct iwl_wowlan_status { + __le64 replay_ctr; + __le16 pattern_number; + __le16 non_qos_seq_ctr; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 rekey_status; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 wake_packet[]; /* can be truncated from _length to _bufsize */ +} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */ + +/* TODO: NetDetect API */ + +#endif /* __fw_api_d3_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h new file mode 100644 index 000000000000..ae39b7dfda7b --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h @@ -0,0 +1,369 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __fw_api_mac_h__ +#define __fw_api_mac_h__ + +/* + * The first MAC indices (starting from 0) + * are available to the driver, AUX follows + */ +#define MAC_INDEX_AUX 4 +#define MAC_INDEX_MIN_DRIVER 0 +#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX + +#define AC_NUM 4 /* Number of access categories */ + +/** + * enum iwl_mac_protection_flags - MAC context flags + * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames, + * this will require CCK RTS/CTS2self. + * RTS/CTS will protect full burst time. + * @MAC_PROT_FLG_HT_PROT: enable HT protection + * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions + * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self + */ +enum iwl_mac_protection_flags { + MAC_PROT_FLG_TGG_PROTECT = BIT(3), + MAC_PROT_FLG_HT_PROT = BIT(23), + MAC_PROT_FLG_FAT_PROT = BIT(24), + MAC_PROT_FLG_SELF_CTS_EN = BIT(30), +}; + +#define MAC_FLG_SHORT_SLOT BIT(4) +#define MAC_FLG_SHORT_PREAMBLE BIT(5) + +/** + * enum iwl_mac_types - Supported MAC types + * @FW_MAC_TYPE_FIRST: lowest supported MAC type + * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal) + * @FW_MAC_TYPE_LISTENER: monitor MAC type (?) + * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS + * @FW_MAC_TYPE_IBSS: IBSS + * @FW_MAC_TYPE_BSS_STA: BSS (managed) station + * @FW_MAC_TYPE_P2P_DEVICE: P2P Device + * @FW_MAC_TYPE_P2P_STA: P2P client + * @FW_MAC_TYPE_GO: P2P GO + * @FW_MAC_TYPE_TEST: ? + * @FW_MAC_TYPE_MAX: highest support MAC type + */ +enum iwl_mac_types { + FW_MAC_TYPE_FIRST = 1, + FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST, + FW_MAC_TYPE_LISTENER, + FW_MAC_TYPE_PIBSS, + FW_MAC_TYPE_IBSS, + FW_MAC_TYPE_BSS_STA, + FW_MAC_TYPE_P2P_DEVICE, + FW_MAC_TYPE_P2P_STA, + FW_MAC_TYPE_GO, + FW_MAC_TYPE_TEST, + FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST +}; /* MAC_CONTEXT_TYPE_API_E_VER_1 */ + +/** + * enum iwl_tsf_id - TSF hw timer ID + * @TSF_ID_A: use TSF A + * @TSF_ID_B: use TSF B + * @TSF_ID_C: use TSF C + * @TSF_ID_D: use TSF D + * @NUM_TSF_IDS: number of TSF timers available + */ +enum iwl_tsf_id { + TSF_ID_A = 0, + TSF_ID_B = 1, + TSF_ID_C = 2, + TSF_ID_D = 3, + NUM_TSF_IDS = 4, +}; /* TSF_ID_API_E_VER_1 */ + +/** + * struct iwl_mac_data_ap - configuration data for AP MAC context + * @beacon_time: beacon transmit time in system time + * @beacon_tsf: beacon transmit time in TSF + * @bi: beacon interval in TU + * @bi_reciprocal: 2^32 / bi + * @dtim_interval: dtim transmit time in TU + * @dtim_reciprocal: 2^32 / dtim_interval + * @mcast_qid: queue ID for multicast traffic + * @beacon_template: beacon template ID + */ +struct iwl_mac_data_ap { + __le32 beacon_time; + __le64 beacon_tsf; + __le32 bi; + __le32 bi_reciprocal; + __le32 dtim_interval; + __le32 dtim_reciprocal; + __le32 mcast_qid; + __le32 beacon_template; +} __packed; /* AP_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_ibss - configuration data for IBSS MAC context + * @beacon_time: beacon transmit time in system time + * @beacon_tsf: beacon transmit time in TSF + * @bi: beacon interval in TU + * @bi_reciprocal: 2^32 / bi + */ +struct iwl_mac_data_ibss { + __le32 beacon_time; + __le64 beacon_tsf; + __le32 bi; + __le32 bi_reciprocal; +} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_sta - configuration data for station MAC context + * @is_assoc: 1 for associated state, 0 otherwise + * @dtim_time: DTIM arrival time in system time + * @dtim_tsf: DTIM arrival time in TSF + * @bi: beacon interval in TU, applicable only when associated + * @bi_reciprocal: 2^32 / bi , applicable only when associated + * @dtim_interval: DTIM interval in TU, applicable only when associated + * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated + * @listen_interval: in beacon intervals, applicable only when associated + * @assoc_id: unique ID assigned by the AP during association + */ +struct iwl_mac_data_sta { + __le32 is_assoc; + __le32 dtim_time; + __le64 dtim_tsf; + __le32 bi; + __le32 bi_reciprocal; + __le32 dtim_interval; + __le32 dtim_reciprocal; + __le32 listen_interval; + __le32 assoc_id; + __le32 assoc_beacon_arrive_time; +} __packed; /* STA_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_go - configuration data for P2P GO MAC context + * @ap: iwl_mac_data_ap struct with most config data + * @ctwin: client traffic window in TU (period after TBTT when GO is present). + * 0 indicates that there is no CT window. + * @opp_ps_enabled: indicate that opportunistic PS allowed + */ +struct iwl_mac_data_go { + struct iwl_mac_data_ap ap; + __le32 ctwin; + __le32 opp_ps_enabled; +} __packed; /* GO_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context + * @sta: iwl_mac_data_sta struct with most config data + * @ctwin: client traffic window in TU (period after TBTT when GO is present). + * 0 indicates that there is no CT window. + */ +struct iwl_mac_data_p2p_sta { + struct iwl_mac_data_sta sta; + __le32 ctwin; +} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_pibss - Pseudo IBSS config data + * @stats_interval: interval in TU between statistics notifications to host. + */ +struct iwl_mac_data_pibss { + __le32 stats_interval; +} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */ + +/* + * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC + * context. + * @is_disc_extended: if set to true, P2P Device discoverability is enabled on + * other channels as well. This should be to true only in case that the + * device is discoverable and there is an active GO. Note that setting this + * field when not needed, will increase the number of interrupts and have + * effect on the platform power, as this setting opens the Rx filters on + * all macs. + */ +struct iwl_mac_data_p2p_dev { + __le32 is_disc_extended; +} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */ + +/** + * enum iwl_mac_filter_flags - MAC context filter flags + * @MAC_FILTER_IN_PROMISC: accept all data frames + * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all mangement and + * control frames to the host + * @MAC_FILTER_ACCEPT_GRP: accept multicast frames + * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames + * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames + * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host + * (in station mode when associated) + * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames + * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames + * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host + */ +enum iwl_mac_filter_flags { + MAC_FILTER_IN_PROMISC = BIT(0), + MAC_FILTER_IN_CONTROL_AND_MGMT = BIT(1), + MAC_FILTER_ACCEPT_GRP = BIT(2), + MAC_FILTER_DIS_DECRYPT = BIT(3), + MAC_FILTER_DIS_GRP_DECRYPT = BIT(4), + MAC_FILTER_IN_BEACON = BIT(6), + MAC_FILTER_OUT_BCAST = BIT(8), + MAC_FILTER_IN_CRC32 = BIT(11), + MAC_FILTER_IN_PROBE_REQUEST = BIT(12), +}; + +/** + * enum iwl_mac_qos_flags - QoS flags + * @MAC_QOS_FLG_UPDATE_EDCA: ? + * @MAC_QOS_FLG_TGN: HT is enabled + * @MAC_QOS_FLG_TXOP_TYPE: ? + * + */ +enum iwl_mac_qos_flags { + MAC_QOS_FLG_UPDATE_EDCA = BIT(0), + MAC_QOS_FLG_TGN = BIT(1), + MAC_QOS_FLG_TXOP_TYPE = BIT(4), +}; + +/** + * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD + * @cw_min: Contention window, start value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x0f. + * @cw_max: Contention window, max value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x3f. + * @aifsn: Number of slots in Arbitration Interframe Space (before + * performing random backoff timing prior to Tx). Device default 1. + * @fifos_mask: FIFOs used by this MAC for this AC + * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. + * + * One instance of this config struct for each of 4 EDCA access categories + * in struct iwl_qosparam_cmd. + * + * Device will automatically increase contention window by (2*CW) + 1 for each + * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW + * value, to cap the CW value. + */ +struct iwl_ac_qos { + __le16 cw_min; + __le16 cw_max; + u8 aifsn; + u8 fifos_mask; + __le16 edca_txop; +} __packed; /* AC_QOS_API_S_VER_2 */ + +/** + * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts + * ( MAC_CONTEXT_CMD = 0x28 ) + * @id_and_color: ID and color of the MAC + * @action: action to perform, one of FW_CTXT_ACTION_* + * @mac_type: one of FW_MAC_TYPE_* + * @tsd_id: TSF HW timer, one of TSF_ID_* + * @node_addr: MAC address + * @bssid_addr: BSSID + * @cck_rates: basic rates available for CCK + * @ofdm_rates: basic rates available for OFDM + * @protection_flags: combination of MAC_PROT_FLG_FLAG_* + * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise + * @short_slot: 0x10 for enabling short slots, 0 otherwise + * @filter_flags: combination of MAC_FILTER_* + * @qos_flags: from MAC_QOS_FLG_* + * @ac: one iwl_mac_qos configuration for each AC + * @mac_specific: one of struct iwl_mac_data_*, according to mac_type + */ +struct iwl_mac_ctx_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */ + __le32 mac_type; + __le32 tsf_id; + u8 node_addr[6]; + __le16 reserved_for_node_addr; + u8 bssid_addr[6]; + __le16 reserved_for_bssid_addr; + __le32 cck_rates; + __le32 ofdm_rates; + __le32 protection_flags; + __le32 cck_short_preamble; + __le32 short_slot; + __le32 filter_flags; + /* MAC_QOS_PARAM_API_S_VER_1 */ + __le32 qos_flags; + struct iwl_ac_qos ac[AC_NUM+1]; + /* MAC_CONTEXT_COMMON_DATA_API_S */ + union { + struct iwl_mac_data_ap ap; + struct iwl_mac_data_go go; + struct iwl_mac_data_sta sta; + struct iwl_mac_data_p2p_sta p2p_sta; + struct iwl_mac_data_p2p_dev p2p_dev; + struct iwl_mac_data_pibss pibss; + struct iwl_mac_data_ibss ibss; + }; +} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */ + +static inline u32 iwl_mvm_reciprocal(u32 v) +{ + if (!v) + return 0; + return 0xFFFFFFFF / v; +} + +#endif /* __fw_api_mac_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h new file mode 100644 index 000000000000..be36b7604b7f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -0,0 +1,140 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __fw_api_power_h__ +#define __fw_api_power_h__ + +/* Power Management Commands, Responses, Notifications */ + +/** + * enum iwl_scan_flags - masks for power table command flags + * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, + * '1' Driver enables PM (use rest of parameters) + * @POWER_FLAGS_SLEEP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM, + * '1' PM could sleep over DTIM till listen Interval. + * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable. + * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all + * access categories are both delivery and trigger enabled. + * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and + * PBW Snoozing enabled + * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask +*/ +enum iwl_power_flags { + POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(0), + POWER_FLAGS_SLEEP_OVER_DTIM_MSK = BIT(1), + POWER_FLAGS_LPRX_ENA_MSK = BIT(2), + POWER_FLAGS_SNOOZE_ENA_MSK = BIT(3), + POWER_FLAGS_BT_SCO_ENA = BIT(4), + POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(5) +}; + +/** + * struct iwl_powertable_cmd - Power Table Command + * POWER_TABLE_CMD = 0x77 (command, has simple generic response) + * + * @id_and_color: MAC contex identifier + * @action: Action on context - no action, add new, + * modify existent, remove + * @flags: Power table command flags from POWER_FLAGS_* + * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. + * Minimum allowed:- 3 * DTIM + * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to + * PSM transition - legacy PM + * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to + * PSM transition - legacy PM + * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to + * PSM transition - uAPSD + * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to + * PSM transition - uAPSD + * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. + * Default: 80dbm + * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set + * @snooze_interval: TBD + * @snooze_window: TBD + * @snooze_step: TBD + * @qndp_tid: TBD + * @uapsd_ac_flags: TBD + * @uapsd_max_sp: TBD + */ +struct iwl_powertable_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + __le16 flags; + u8 reserved; + __le16 keep_alive_seconds; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 rx_data_timeout_uapsd; + __le32 tx_data_timeout_uapsd; + u8 lprx_rssi_threshold; + u8 num_skip_dtim; + __le16 snooze_interval; + __le16 snooze_window; + u8 snooze_step; + u8 qndp_tid; + u8 uapsd_ac_flags; + u8 uapsd_max_sp; +} __packed; + +#endif diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h new file mode 100644 index 000000000000..aa3474d08231 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h @@ -0,0 +1,312 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __fw_api_rs_h__ +#define __fw_api_rs_h__ + +#include "fw-api-mac.h" + +/* + * These serve as indexes into + * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT]; + */ +enum { + IWL_RATE_1M_INDEX = 0, + IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, + IWL_RATE_2M_INDEX, + IWL_RATE_5M_INDEX, + IWL_RATE_11M_INDEX, + IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, + IWL_RATE_6M_INDEX, + IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, + IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, + IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, + IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, + IWL_RATE_54M_INDEX, + IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX, + IWL_RATE_60M_INDEX, + IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX, + IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1, + IWL_RATE_COUNT, +}; + +#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX) + +/* fw API values for legacy bit rates, both OFDM and CCK */ +enum { + IWL_RATE_6M_PLCP = 13, + IWL_RATE_9M_PLCP = 15, + IWL_RATE_12M_PLCP = 5, + IWL_RATE_18M_PLCP = 7, + IWL_RATE_24M_PLCP = 9, + IWL_RATE_36M_PLCP = 11, + IWL_RATE_48M_PLCP = 1, + IWL_RATE_54M_PLCP = 3, + IWL_RATE_1M_PLCP = 10, + IWL_RATE_2M_PLCP = 20, + IWL_RATE_5M_PLCP = 55, + IWL_RATE_11M_PLCP = 110, +}; + +/* + * rate_n_flags bit fields + * + * The 32-bit value has different layouts in the low 8 bites depending on the + * format. There are three formats, HT, VHT and legacy (11abg, with subformats + * for CCK and OFDM). + * + * High-throughput (HT) rate format + * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM) + * Very High-throughput (VHT) rate format + * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM) + * Legacy OFDM rate format for bits 7:0 + * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM) + * Legacy CCK rate format for bits 7:0: + * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK) + */ + +/* Bit 8: (1) HT format, (0) legacy or VHT format */ +#define RATE_MCS_HT_POS 8 +#define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS) + +/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ +#define RATE_MCS_CCK_POS 9 +#define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS) + +/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */ +#define RATE_MCS_VHT_POS 26 +#define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS) + + +/* + * High-throughput (HT) rate format for bits 7:0 + * + * 2-0: MCS rate base + * 0) 6 Mbps + * 1) 12 Mbps + * 2) 18 Mbps + * 3) 24 Mbps + * 4) 36 Mbps + * 5) 48 Mbps + * 6) 54 Mbps + * 7) 60 Mbps + * 4-3: 0) Single stream (SISO) + * 1) Dual stream (MIMO) + * 2) Triple stream (MIMO) + * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data + * (bits 7-6 are zero) + * + * Together the low 5 bits work out to the MCS index because we don't + * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two + * streams and 16-23 have three streams. We could also support MCS 32 + * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.) + */ +#define RATE_HT_MCS_RATE_CODE_MSK 0x7 + +/* Bit 10: (1) Use Green Field preamble */ +#define RATE_HT_MCS_GF_POS 10 +#define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS) + +#define RATE_HT_MCS_INDEX_MSK 0x3f + +/* + * Very High-throughput (VHT) rate format for bits 7:0 + * + * 3-0: VHT MCS (0-9) + * 5-4: number of streams - 1: + * 0) Single stream (SISO) + * 1) Dual stream (MIMO) + * 2) Triple stream (MIMO) + */ + +/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */ +#define RATE_VHT_MCS_RATE_CODE_MSK 0xf +#define RATE_VHT_MCS_NSS_POS 4 +#define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS) + +/* + * Legacy OFDM rate format for bits 7:0 + * + * 3-0: 0xD) 6 Mbps + * 0xF) 9 Mbps + * 0x5) 12 Mbps + * 0x7) 18 Mbps + * 0x9) 24 Mbps + * 0xB) 36 Mbps + * 0x1) 48 Mbps + * 0x3) 54 Mbps + * (bits 7-4 are 0) + * + * Legacy CCK rate format for bits 7:0: + * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK): + * + * 6-0: 10) 1 Mbps + * 20) 2 Mbps + * 55) 5.5 Mbps + * 110) 11 Mbps + * (bit 7 is 0) + */ +#define RATE_LEGACY_RATE_MSK 0xff + + +/* + * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz + * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT + */ +#define RATE_MCS_CHAN_WIDTH_POS 11 +#define RATE_MCS_CHAN_WIDTH_MSK (3 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS) + +/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ +#define RATE_MCS_SGI_POS 13 +#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS) + +/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */ +#define RATE_MCS_ANT_POS 14 +#define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS) +#define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS) +#define RATE_MCS_ANT_C_MSK (4 << RATE_MCS_ANT_POS) +#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \ + RATE_MCS_ANT_B_MSK) +#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \ + RATE_MCS_ANT_C_MSK) +#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK +#define RATE_MCS_ANT_NUM 3 + +/* Bit 17-18: (0) SS, (1) SS*2 */ +#define RATE_MCS_STBC_POS 17 +#define RATE_MCS_STBC_MSK (1 << RATE_MCS_STBC_POS) + +/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */ +#define RATE_MCS_BF_POS 19 +#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS) + +/* Bit 20: (0) ZLF is off, (1) ZLF is on */ +#define RATE_MCS_ZLF_POS 20 +#define RATE_MCS_ZLF_MSK (1 << RATE_MCS_ZLF_POS) + +/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ +#define RATE_MCS_DUP_POS 24 +#define RATE_MCS_DUP_MSK (3 << RATE_MCS_DUP_POS) + +/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */ +#define RATE_MCS_LDPC_POS 27 +#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS) + + +/* Link Quality definitions */ + +/* # entries in rate scale table to support Tx retries */ +#define LQ_MAX_RETRY_NUM 16 + +/* Link quality command flags, only this one is available */ +#define LQ_FLAG_SET_STA_TLC_RTS_MSK BIT(0) + +/** + * struct iwl_lq_cmd - link quality command + * @sta_id: station to update + * @control: not used + * @flags: combination of LQ_FLAG_* + * @mimo_delim: the first SISO index in rs_table, which separates MIMO + * and SISO rates + * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD). + * Should be ANT_[ABC] + * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC] + * @initial_rate_index: first index from rs_table per AC category + * @agg_time_limit: aggregation max time threshold in usec/100, meaning + * value of 100 is one usec. Range is 100 to 8000 + * @agg_disable_start_th: try-count threshold for starting aggregation. + * If a frame has higher try-count, it should not be selected for + * starting an aggregation sequence. + * @agg_frame_cnt_limit: max frame count in an aggregation. + * 0: no limit + * 1: no aggregation (one frame per aggregation) + * 2 - 0x3f: maximal number of frames (up to 3f == 63) + * @rs_table: array of rates for each TX try, each is rate_n_flags, + * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP + * @bf_params: beam forming params, currently not used + */ +struct iwl_lq_cmd { + u8 sta_id; + u8 reserved1; + u16 control; + /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */ + u8 flags; + u8 mimo_delim; + u8 single_stream_ant_msk; + u8 dual_stream_ant_msk; + u8 initial_rate_index[AC_NUM]; + /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */ + __le16 agg_time_limit; + u8 agg_disable_start_th; + u8 agg_frame_cnt_limit; + __le32 reserved2; + __le32 rs_table[LQ_MAX_RETRY_NUM]; + __le32 bf_params; +}; /* LINK_QUALITY_CMD_API_S_VER_1 */ +#endif /* __fw_api_rs_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h new file mode 100644 index 000000000000..670ac8f95e26 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -0,0 +1,561 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __fw_api_scan_h__ +#define __fw_api_scan_h__ + +#include "fw-api.h" + +/* Scan Commands, Responses, Notifications */ + +/* Masks for iwl_scan_channel.type flags */ +#define SCAN_CHANNEL_TYPE_PASSIVE 0 +#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0) +#define SCAN_CHANNEL_NARROW_BAND BIT(22) + +/* Max number of IEs for direct SSID scans in a command */ +#define PROBE_OPTION_MAX 20 + +/** + * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table + * @channel: band is selected by iwl_scan_cmd "flags" field + * @tx_gain: gain for analog radio + * @dsp_atten: gain for DSP + * @active_dwell: dwell time for active scan in TU, typically 5-50 + * @passive_dwell: dwell time for passive scan in TU, typically 20-500 + * @type: type is broken down to these bits: + * bit 0: 0 = passive, 1 = active + * bits 1-20: SSID direct bit map. If any of these bits is set then + * the corresponding SSID IE is transmitted in probe request + * (bit i adds IE in position i to the probe request) + * bit 22: channel width, 0 = regular, 1 = TGj narrow channel + * + * @iteration_count: + * @iteration_interval: + * This struct is used once for each channel in the scan list. + * Each channel can independently select: + * 1) SSID for directed active scans + * 2) Txpower setting (for rate specified within Tx command) + * 3) How long to stay on-channel (behavior may be modified by quiet_time, + * quiet_plcp_th, good_CRC_th) + * + * To avoid uCode errors, make sure the following are true (see comments + * under struct iwl_scan_cmd about max_out_time and quiet_time): + * 1) If using passive_dwell (i.e. passive_dwell != 0): + * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0) + * 2) quiet_time <= active_dwell + * 3) If restricting off-channel time (i.e. max_out_time !=0): + * passive_dwell < max_out_time + * active_dwell < max_out_time + */ +struct iwl_scan_channel { + __le32 type; + __le16 channel; + __le16 iteration_count; + __le32 iteration_interval; + __le16 active_dwell; + __le16 passive_dwell; +} __packed; /* SCAN_CHANNEL_CONTROL_API_S_VER_1 */ + +/** + * struct iwl_ssid_ie - directed scan network information element + * + * Up to 20 of these may appear in REPLY_SCAN_CMD, + * selected by "type" bit field in struct iwl_scan_channel; + * each channel may select different ssids from among the 20 entries. + * SSID IEs get transmitted in reverse order of entry. + */ +struct iwl_ssid_ie { + u8 id; + u8 len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; +} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ + +/** + * iwl_scan_flags - masks for scan command flags + *@SCAN_FLAGS_PERIODIC_SCAN: + *@SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX: + *@SCAN_FLAGS_DELAYED_SCAN_LOWBAND: + *@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND: + *@SCAN_FLAGS_FRAGMENTED_SCAN: + */ +enum iwl_scan_flags { + SCAN_FLAGS_PERIODIC_SCAN = BIT(0), + SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX = BIT(1), + SCAN_FLAGS_DELAYED_SCAN_LOWBAND = BIT(2), + SCAN_FLAGS_DELAYED_SCAN_HIGHBAND = BIT(3), + SCAN_FLAGS_FRAGMENTED_SCAN = BIT(4), +}; + +/** + * enum iwl_scan_type - Scan types for scan command + * @SCAN_TYPE_FORCED: + * @SCAN_TYPE_BACKGROUND: + * @SCAN_TYPE_OS: + * @SCAN_TYPE_ROAMING: + * @SCAN_TYPE_ACTION: + * @SCAN_TYPE_DISCOVERY: + * @SCAN_TYPE_DISCOVERY_FORCED: + */ +enum iwl_scan_type { + SCAN_TYPE_FORCED = 0, + SCAN_TYPE_BACKGROUND = 1, + SCAN_TYPE_OS = 2, + SCAN_TYPE_ROAMING = 3, + SCAN_TYPE_ACTION = 4, + SCAN_TYPE_DISCOVERY = 5, + SCAN_TYPE_DISCOVERY_FORCED = 6, +}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */ + +/* Maximal number of channels to scan */ +#define MAX_NUM_SCAN_CHANNELS 0x24 + +/** + * struct iwl_scan_cmd - scan request command + * ( SCAN_REQUEST_CMD = 0x80 ) + * @len: command length in bytes + * @scan_flags: scan flags from SCAN_FLAGS_* + * @channel_count: num of channels in channel list (1 - MAX_NUM_SCAN_CHANNELS) + * @quiet_time: in msecs, dwell this time for active scan on quiet channels + * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than + * this number of packets were received (typically 1) + * @passive2active: is auto switching from passive to active allowed (0 or 1) + * @rxchain_sel_flags: RXON_RX_CHAIN_* + * @max_out_time: in usecs, max out of serving channel time + * @suspend_time: how long to pause scan when returning to service channel: + * bits 0-19: beacon interal in usecs (suspend before executing) + * bits 20-23: reserved + * bits 24-31: number of beacons (suspend between channels) + * @rxon_flags: RXON_FLG_* + * @filter_flags: RXON_FILTER_* + * @tx_cmd: for active scans (zero for passive), w/o payload, + * no RS so specify TX rate + * @direct_scan: direct scan SSIDs + * @type: one of SCAN_TYPE_* + * @repeats: how many time to repeat the scan + */ +struct iwl_scan_cmd { + __le16 len; + u8 scan_flags; + u8 channel_count; + __le16 quiet_time; + __le16 quiet_plcp_th; + __le16 passive2active; + __le16 rxchain_sel_flags; + __le32 max_out_time; + __le32 suspend_time; + /* RX_ON_FLAGS_API_S_VER_1 */ + __le32 rxon_flags; + __le32 filter_flags; + struct iwl_tx_cmd tx_cmd; + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; + __le32 type; + __le32 repeats; + + /* + * Probe request frame, followed by channel list. + * + * Size of probe request frame is specified by byte count in tx_cmd. + * Channel list follows immediately after probe request frame. + * Number of channels in list is specified by channel_count. + * Each channel in list is of type: + * + * struct iwl_scan_channel channels[0]; + * + * NOTE: Only one band of channels can be scanned per pass. You + * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait + * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) + * before requesting another scan. + */ + u8 data[0]; +} __packed; /* SCAN_REQUEST_FIXED_PART_API_S_VER_5 */ + +/* Response to scan request contains only status with one of these values */ +#define SCAN_RESPONSE_OK 0x1 +#define SCAN_RESPONSE_ERROR 0x2 + +/* + * SCAN_ABORT_CMD = 0x81 + * When scan abort is requested, the command has no fields except the common + * header. The response contains only a status with one of these values. + */ +#define SCAN_ABORT_POSSIBLE 0x1 +#define SCAN_ABORT_IGNORED 0x2 /* no pending scans */ + +/* TODO: complete documentation */ +#define SCAN_OWNER_STATUS 0x1 +#define MEASURE_OWNER_STATUS 0x2 + +/** + * struct iwl_scan_start_notif - notifies start of scan in the device + * ( SCAN_START_NOTIFICATION = 0x82 ) + * @tsf_low: TSF timer (lower half) in usecs + * @tsf_high: TSF timer (higher half) in usecs + * @beacon_timer: structured as follows: + * bits 0:19 - beacon interval in usecs + * bits 20:23 - reserved (0) + * bits 24:31 - number of beacons + * @channel: which channel is scanned + * @band: 0 for 5.2 GHz, 1 for 2.4 GHz + * @status: one of *_OWNER_STATUS + */ +struct iwl_scan_start_notif { + __le32 tsf_low; + __le32 tsf_high; + __le32 beacon_timer; + u8 channel; + u8 band; + u8 reserved[2]; + __le32 status; +} __packed; /* SCAN_START_NTF_API_S_VER_1 */ + +/* scan results probe_status first bit indicates success */ +#define SCAN_PROBE_STATUS_OK 0 +#define SCAN_PROBE_STATUS_TX_FAILED BIT(0) +/* error statuses combined with TX_FAILED */ +#define SCAN_PROBE_STATUS_FAIL_TTL BIT(1) +#define SCAN_PROBE_STATUS_FAIL_BT BIT(2) + +/* How many statistics are gathered for each channel */ +#define SCAN_RESULTS_STATISTICS 1 + +/** + * enum iwl_scan_complete_status - status codes for scan complete notifications + * @SCAN_COMP_STATUS_OK: scan completed successfully + * @SCAN_COMP_STATUS_ABORT: scan was aborted by user + * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed + * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready + * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed + * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed + * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command + * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort + * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax + * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful + * (not an error!) + * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repeatition the driver + * asked for + * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events +*/ +enum iwl_scan_complete_status { + SCAN_COMP_STATUS_OK = 0x1, + SCAN_COMP_STATUS_ABORT = 0x2, + SCAN_COMP_STATUS_ERR_SLEEP = 0x3, + SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4, + SCAN_COMP_STATUS_ERR_PROBE = 0x5, + SCAN_COMP_STATUS_ERR_WAKEUP = 0x6, + SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7, + SCAN_COMP_STATUS_ERR_INTERNAL = 0x8, + SCAN_COMP_STATUS_ERR_COEX = 0x9, + SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA, + SCAN_COMP_STATUS_ITERATION_END = 0x0B, + SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C, +}; + +/** + * struct iwl_scan_results_notif - scan results for one channel + * ( SCAN_RESULTS_NOTIFICATION = 0x83 ) + * @channel: which channel the results are from + * @band: 0 for 5.2 GHz, 1 for 2.4 GHz + * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request + * @num_probe_not_sent: # of request that weren't sent due to not enough time + * @duration: duration spent in channel, in usecs + * @statistics: statistics gathered for this channel + */ +struct iwl_scan_results_notif { + u8 channel; + u8 band; + u8 probe_status; + u8 num_probe_not_sent; + __le32 duration; + __le32 statistics[SCAN_RESULTS_STATISTICS]; +} __packed; /* SCAN_RESULT_NTF_API_S_VER_2 */ + +/** + * struct iwl_scan_complete_notif - notifies end of scanning (all channels) + * ( SCAN_COMPLETE_NOTIFICATION = 0x84 ) + * @scanned_channels: number of channels scanned (and number of valid results) + * @status: one of SCAN_COMP_STATUS_* + * @bt_status: BT on/off status + * @last_channel: last channel that was scanned + * @tsf_low: TSF timer (lower half) in usecs + * @tsf_high: TSF timer (higher half) in usecs + * @results: all scan results, only "scanned_channels" of them are valid + */ +struct iwl_scan_complete_notif { + u8 scanned_channels; + u8 status; + u8 bt_status; + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; + struct iwl_scan_results_notif results[MAX_NUM_SCAN_CHANNELS]; +} __packed; /* SCAN_COMPLETE_NTF_API_S_VER_2 */ + +/* scan offload */ +#define IWL_MAX_SCAN_CHANNELS 40 +#define IWL_SCAN_MAX_BLACKLIST_LEN 64 +#define IWL_SCAN_MAX_PROFILES 11 +#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512 + +/* Default watchdog (in MS) for scheduled scan iteration */ +#define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000) + +#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) +#define CAN_ABORT_STATUS 1 + +#define IWL_FULL_SCAN_MULTIPLIER 5 +#define IWL_FAST_SCHED_SCAN_ITERATIONS 3 + +/** + * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6 + * @scan_flags: see enum iwl_scan_flags + * @channel_count: channels in channel list + * @quiet_time: dwell time, in milisiconds, on quiet channel + * @quiet_plcp_th: quiet channel num of packets threshold + * @good_CRC_th: passive to active promotion threshold + * @rx_chain: RXON rx chain. + * @max_out_time: max uSec to be out of assoceated channel + * @suspend_time: pause scan this long when returning to service channel + * @flags: RXON flags + * @filter_flags: RXONfilter + * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. + * @direct_scan: list of SSIDs for directed active scan + * @scan_type: see enum iwl_scan_type. + * @rep_count: repetition count for each scheduled scan iteration. + */ +struct iwl_scan_offload_cmd { + __le16 len; + u8 scan_flags; + u8 channel_count; + __le16 quiet_time; + __le16 quiet_plcp_th; + __le16 good_CRC_th; + __le16 rx_chain; + __le32 max_out_time; + __le32 suspend_time; + /* RX_ON_FLAGS_API_S_VER_1 */ + __le32 flags; + __le32 filter_flags; + struct iwl_tx_cmd tx_cmd[2]; + /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; + __le32 scan_type; + __le32 rep_count; +} __packed; + +enum iwl_scan_offload_channel_flags { + IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE = BIT(0), + IWL_SCAN_OFFLOAD_CHANNEL_NARROW = BIT(22), + IWL_SCAN_OFFLOAD_CHANNEL_FULL = BIT(24), + IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL = BIT(25), +}; + +/** + * iwl_scan_channel_cfg - SCAN_CHANNEL_CFG_S + * @type: bitmap - see enum iwl_scan_offload_channel_flags. + * 0: passive (0) or active (1) scan. + * 1-20: directed scan to i'th ssid. + * 22: channel width configuation - 1 for narrow. + * 24: full scan. + * 25: partial scan. + * @channel_number: channel number 1-13 etc. + * @iter_count: repetition count for the channel. + * @iter_interval: interval between two innteration on one channel. + * @dwell_time: entry 0 - active scan, entry 1 - passive scan. + */ +struct iwl_scan_channel_cfg { + __le32 type[IWL_MAX_SCAN_CHANNELS]; + __le16 channel_number[IWL_MAX_SCAN_CHANNELS]; + __le16 iter_count[IWL_MAX_SCAN_CHANNELS]; + __le32 iter_interval[IWL_MAX_SCAN_CHANNELS]; + u8 dwell_time[IWL_MAX_SCAN_CHANNELS][2]; +} __packed; + +/** + * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S + * @scan_cmd: scan command fixed part + * @channel_cfg: scan channel configuration + * @data: probe request frames (one per band) + */ +struct iwl_scan_offload_cfg { + struct iwl_scan_offload_cmd scan_cmd; + struct iwl_scan_channel_cfg channel_cfg; + u8 data[0]; +} __packed; + +/** + * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S + * @ssid: MAC address to filter out + * @reported_rssi: AP rssi reported to the host + */ +struct iwl_scan_offload_blacklist { + u8 ssid[ETH_ALEN]; + u8 reported_rssi; + u8 reserved; +} __packed; + +enum iwl_scan_offload_network_type { + IWL_NETWORK_TYPE_BSS = 1, + IWL_NETWORK_TYPE_IBSS = 2, + IWL_NETWORK_TYPE_ANY = 3, +}; + +enum iwl_scan_offload_band_selection { + IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4, + IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8, + IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc, +}; + +/** + * iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S + * @ssid_index: index to ssid list in fixed part + * @unicast_cipher: encryption olgorithm to match - bitmap + * @aut_alg: authentication olgorithm to match - bitmap + * @network_type: enum iwl_scan_offload_network_type + * @band_selection: enum iwl_scan_offload_band_selection + */ +struct iwl_scan_offload_profile { + u8 ssid_index; + u8 unicast_cipher; + u8 auth_alg; + u8 network_type; + u8 band_selection; + u8 reserved[3]; +} __packed; + +/** + * iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1 + * @blaclist: AP list to filter off from scan results + * @profiles: profiles to search for match + * @blacklist_len: length of blacklist + * @num_profiles: num of profiles in the list + */ +struct iwl_scan_offload_profile_cfg { + struct iwl_scan_offload_blacklist blacklist[IWL_SCAN_MAX_BLACKLIST_LEN]; + struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES]; + u8 blacklist_len; + u8 num_profiles; + u8 reserved[2]; +} __packed; + +/** + * iwl_scan_offload_schedule - schedule of scan offload + * @delay: delay between iterations, in seconds. + * @iterations: num of scan iterations + * @full_scan_mul: number of partial scans before each full scan + */ +struct iwl_scan_offload_schedule { + u16 delay; + u8 iterations; + u8 full_scan_mul; +} __packed; + +/* + * iwl_scan_offload_flags + * + * IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID: filter mode - upload every beacon or match + * ssid list. + * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan. + * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan + * on A band. + */ +enum iwl_scan_offload_flags { + IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID = BIT(0), + IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2), + IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3), +}; + +/** + * iwl_scan_offload_req - scan offload request command + * @flags: bitmap - enum iwl_scan_offload_flags. + * @watchdog: maximum scan duration in TU. + * @delay: delay in seconds before first iteration. + * @schedule_line: scan offload schedule, for fast and regular scan. + */ +struct iwl_scan_offload_req { + __le16 flags; + __le16 watchdog; + __le16 delay; + __le16 reserved; + struct iwl_scan_offload_schedule schedule_line[2]; +} __packed; + +enum iwl_scan_offload_compleate_status { + IWL_SCAN_OFFLOAD_COMPLETED = 1, + IWL_SCAN_OFFLOAD_ABORTED = 2, +}; + +/** + * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1 + * @last_schedule_line: last schedule line executed (fast or regular) + * @last_schedule_iteration: last scan iteration executed before scan abort + * @status: enum iwl_scan_offload_compleate_status + */ +struct iwl_scan_offload_complete { + u8 last_schedule_line; + u8 last_schedule_iteration; + u8 status; + u8 reserved; +} __packed; + +#endif diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h new file mode 100644 index 000000000000..0acb53dda22d --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h @@ -0,0 +1,380 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __fw_api_sta_h__ +#define __fw_api_sta_h__ + +/** + * enum iwl_sta_flags - flags for the ADD_STA host command + * @STA_FLG_REDUCED_TX_PWR_CTRL: + * @STA_FLG_REDUCED_TX_PWR_DATA: + * @STA_FLG_FLG_ANT_MSK: Antenna selection + * @STA_FLG_PS: set if STA is in Power Save + * @STA_FLG_INVALID: set if STA is invalid + * @STA_FLG_DLP_EN: Direct Link Protocol is enabled + * @STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs + * @STA_FLG_DRAIN_FLOW: drain flow + * @STA_FLG_PAN: STA is for PAN interface + * @STA_FLG_CLASS_AUTH: + * @STA_FLG_CLASS_ASSOC: + * @STA_FLG_CLASS_MIMO_PROT: + * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU + * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation + * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is + * initialised by driver and can be updated by fw upon reception of + * action frames that can change the channel width. When cleared the fw + * will send all the frames in 20MHz even when FAT channel is requested. + * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the + * driver and can be updated by fw upon reception of action frames. + * @STA_FLG_MFP_EN: Management Frame Protection + */ +enum iwl_sta_flags { + STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3), + STA_FLG_REDUCED_TX_PWR_DATA = BIT(6), + + STA_FLG_FLG_ANT_A = (1 << 4), + STA_FLG_FLG_ANT_B = (2 << 4), + STA_FLG_FLG_ANT_MSK = (STA_FLG_FLG_ANT_A | + STA_FLG_FLG_ANT_B), + + STA_FLG_PS = BIT(8), + STA_FLG_INVALID = BIT(9), + STA_FLG_DLP_EN = BIT(10), + STA_FLG_SET_ALL_KEYS = BIT(11), + STA_FLG_DRAIN_FLOW = BIT(12), + STA_FLG_PAN = BIT(13), + STA_FLG_CLASS_AUTH = BIT(14), + STA_FLG_CLASS_ASSOC = BIT(15), + STA_FLG_RTS_MIMO_PROT = BIT(17), + + STA_FLG_MAX_AGG_SIZE_SHIFT = 19, + STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_MSK = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT), + + STA_FLG_AGG_MPDU_DENS_SHIFT = 23, + STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT), + STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT), + STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT), + STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), + STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), + + STA_FLG_FAT_EN_20MHZ = (0 << 26), + STA_FLG_FAT_EN_40MHZ = (1 << 26), + STA_FLG_FAT_EN_80MHZ = (2 << 26), + STA_FLG_FAT_EN_160MHZ = (3 << 26), + STA_FLG_FAT_EN_MSK = (3 << 26), + + STA_FLG_MIMO_EN_SISO = (0 << 28), + STA_FLG_MIMO_EN_MIMO2 = (1 << 28), + STA_FLG_MIMO_EN_MIMO3 = (2 << 28), + STA_FLG_MIMO_EN_MSK = (3 << 28), +}; + +/** + * enum iwl_sta_key_flag - key flags for the ADD_STA host command + * @STA_KEY_FLG_EN_MSK: mask for encryption algorithm + * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from + * station info array (1 - n 1X mode) + * @STA_KEY_FLG_KEYID_MSK: the index of the key + * @STA_KEY_NOT_VALID: key is invalid + * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key + * @STA_KEY_MULTICAST: set for multical key + * @STA_KEY_MFP: key is used for Management Frame Protection + */ +enum iwl_sta_key_flag { + STA_KEY_FLG_NO_ENC = (0 << 0), + STA_KEY_FLG_WEP = (1 << 0), + STA_KEY_FLG_CCM = (2 << 0), + STA_KEY_FLG_TKIP = (3 << 0), + STA_KEY_FLG_CMAC = (6 << 0), + STA_KEY_FLG_ENC_UNKNOWN = (7 << 0), + STA_KEY_FLG_EN_MSK = (7 << 0), + + STA_KEY_FLG_WEP_KEY_MAP = BIT(3), + STA_KEY_FLG_KEYID_POS = 8, + STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS), + STA_KEY_NOT_VALID = BIT(11), + STA_KEY_FLG_WEP_13BYTES = BIT(12), + STA_KEY_MULTICAST = BIT(14), + STA_KEY_MFP = BIT(15), +}; + +/** + * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed + * @STA_MODIFY_KEY: this command modifies %key + * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx + * @STA_MODIFY_TX_RATE: unused + * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid + * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid + * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count + * @STA_MODIFY_PROT_TH: + * @STA_MODIFY_QUEUES: modify the queues used by this station + */ +enum iwl_sta_modify_flag { + STA_MODIFY_KEY = BIT(0), + STA_MODIFY_TID_DISABLE_TX = BIT(1), + STA_MODIFY_TX_RATE = BIT(2), + STA_MODIFY_ADD_BA_TID = BIT(3), + STA_MODIFY_REMOVE_BA_TID = BIT(4), + STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5), + STA_MODIFY_PROT_TH = BIT(6), + STA_MODIFY_QUEUES = BIT(7), +}; + +#define STA_MODE_MODIFY 1 + +/** + * enum iwl_sta_sleep_flag - type of sleep of the station + * @STA_SLEEP_STATE_AWAKE: + * @STA_SLEEP_STATE_PS_POLL: + * @STA_SLEEP_STATE_UAPSD: + */ +enum iwl_sta_sleep_flag { + STA_SLEEP_STATE_AWAKE = 0, + STA_SLEEP_STATE_PS_POLL = BIT(0), + STA_SLEEP_STATE_UAPSD = BIT(1), +}; + +/* STA ID and color bits definitions */ +#define STA_ID_SEED (0x0f) +#define STA_ID_POS (0) +#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS) + +#define STA_COLOR_SEED (0x7) +#define STA_COLOR_POS (4) +#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS) + +#define STA_ID_N_COLOR_GET_COLOR(id_n_color) \ + (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS) +#define STA_ID_N_COLOR_GET_ID(id_n_color) \ + (((id_n_color) & STA_ID_MSK) >> STA_ID_POS) + +#define STA_KEY_MAX_NUM (16) +#define STA_KEY_IDX_INVALID (0xff) +#define STA_KEY_MAX_DATA_KEY_NUM (4) +#define IWL_MAX_GLOBAL_KEYS (4) +#define STA_KEY_LEN_WEP40 (5) +#define STA_KEY_LEN_WEP104 (13) + +/** + * struct iwl_mvm_keyinfo - key information + * @key_flags: type %iwl_sta_key_flag + * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection + * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx + * @key_offset: key offset in the fw's key table + * @key: 16-byte unicast decryption key + * @tx_secur_seq_cnt: initial RSC / PN needed for replay check + * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only + * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only + */ +struct iwl_mvm_keyinfo { + __le16 key_flags; + u8 tkip_rx_tsc_byte2; + u8 reserved1; + __le16 tkip_rx_ttak[5]; + u8 key_offset; + u8 reserved2; + u8 key[16]; + __le64 tx_secur_seq_cnt; + __le64 hw_tkip_mic_rx_key; + __le64 hw_tkip_mic_tx_key; +} __packed; + +/** + * struct iwl_mvm_add_sta_cmd - Add / modify a station in the fw's station table + * ( REPLY_ADD_STA = 0x18 ) + * @add_modify: 1: modify existing, 0: add new station + * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent + * @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key + * sent + * @mac_id_n_color: the Mac context this station belongs to + * @addr[ETH_ALEN]: station's MAC address + * @sta_id: index of station in uCode's station table + * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave + * alone. 1 - modify, 0 - don't change. + * @key: look at %iwl_mvm_keyinfo + * @station_flags: look at %iwl_sta_flags + * @station_flags_msk: what of %station_flags have changed + * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable + * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. + * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) + * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set + * add_immediate_ba_ssn. + * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) + * Set %STA_MODIFY_REMOVE_BA_TID to use this field + * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with + * add_immediate_ba_tid. + * @sleep_tx_count: number of packets to transmit to station even though it is + * asleep. Used to synchronise PS-poll and u-APSD responses while ucode + * keeps track of STA sleep state. + * @sleep_state_flags: Look at %iwl_sta_sleep_flag. + * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP + * mac-addr. + * @beamform_flags: beam forming controls + * @tfd_queue_msk: tfd queues used by this station + * + * The device contains an internal table of per-station information, with info + * on security keys, aggregation parameters, and Tx rates for initial Tx + * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). + * + * ADD_STA sets up the table entry for one station, either creating a new + * entry, or modifying a pre-existing one. + */ +struct iwl_mvm_add_sta_cmd { + u8 add_modify; + u8 unicast_tx_key_id; + u8 multicast_tx_key_id; + u8 reserved1; + __le32 mac_id_n_color; + u8 addr[ETH_ALEN]; + __le16 reserved2; + u8 sta_id; + u8 modify_mask; + __le16 reserved3; + struct iwl_mvm_keyinfo key; + __le32 station_flags; + __le32 station_flags_msk; + __le16 tid_disable_tx; + __le16 reserved4; + u8 add_immediate_ba_tid; + u8 remove_immediate_ba_tid; + __le16 add_immediate_ba_ssn; + __le16 sleep_tx_count; + __le16 sleep_state_flags; + __le16 assoc_id; + __le16 beamform_flags; + __le32 tfd_queue_msk; +} __packed; /* ADD_STA_CMD_API_S_VER_5 */ + +/** + * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command + * @ADD_STA_SUCCESS: operation was executed successfully + * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table + * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session + * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that + * doesn't exist. + */ +enum iwl_mvm_add_sta_rsp_status { + ADD_STA_SUCCESS = 0x1, + ADD_STA_STATIONS_OVERLOAD = 0x2, + ADD_STA_IMMEDIATE_BA_FAILURE = 0x4, + ADD_STA_MODIFY_NON_EXISTING_STA = 0x8, +}; + +/** + * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table + * ( REMOVE_STA = 0x19 ) + * @sta_id: the station id of the station to be removed + */ +struct iwl_mvm_rm_sta_cmd { + u8 sta_id; + u8 reserved[3]; +} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */ + +/** + * struct iwl_mvm_mgmt_mcast_key_cmd + * ( MGMT_MCAST_KEY = 0x1f ) + * @ctrl_flags: %iwl_sta_key_flag + * @IGTK: + * @K1: IGTK master key + * @K2: IGTK sub key + * @sta_id: station ID that support IGTK + * @key_id: + * @receive_seq_cnt: initial RSC/PN needed for replay check + */ +struct iwl_mvm_mgmt_mcast_key_cmd { + __le32 ctrl_flags; + u8 IGTK[16]; + u8 K1[16]; + u8 K2[16]; + __le32 key_id; + __le32 sta_id; + __le64 receive_seq_cnt; +} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */ + +struct iwl_mvm_wep_key { + u8 key_index; + u8 key_offset; + __le16 reserved1; + u8 key_size; + u8 reserved2[3]; + u8 key[16]; +} __packed; + +struct iwl_mvm_wep_key_cmd { + __le32 mac_id_n_color; + u8 num_keys; + u8 decryption_type; + u8 flags; + u8 reserved; + struct iwl_mvm_wep_key wep_key[0]; +} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */ + + +#endif /* __fw_api_sta_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h new file mode 100644 index 000000000000..2677914bf0a6 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h @@ -0,0 +1,580 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __fw_api_tx_h__ +#define __fw_api_tx_h__ + +/** + * enum iwl_tx_flags - bitmasks for tx_flags in TX command + * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame + * @TX_CMD_FLG_ACK: expect ACK from receiving station + * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command. + * Otherwise, use rate_n_flags from the TX command + * @TX_CMD_FLG_BA: this frame is a block ack + * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected + * Must set TX_CMD_FLG_ACK with this flag. + * @TX_CMD_FLG_TXOP_PROT: protect frame with full TXOP protection + * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence + * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence + * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC) + * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame + * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control. + * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command + * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU + * @TX_CMD_FLG_NEXT_FRAME: this frame includes information of the next frame + * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame + * Should be set for beacons and probe responses + * @TX_CMD_FLG_CALIB: activate PA TX power calibrations + * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count + * @TX_CMD_FLG_AGG_START: allow this frame to start aggregation + * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header. + * Should be set for 26/30 length MAC headers + * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW + * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration + * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation + * @TX_CMD_FLG_CTS_ONLY: send CTS only, no data after that + * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id + * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped + * @TX_CMD_FLG_EXEC_PAPD: execute PAPD + * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power + * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk + */ +enum iwl_tx_flags { + TX_CMD_FLG_PROT_REQUIRE = BIT(0), + TX_CMD_FLG_ACK = BIT(3), + TX_CMD_FLG_STA_RATE = BIT(4), + TX_CMD_FLG_BA = BIT(5), + TX_CMD_FLG_BAR = BIT(6), + TX_CMD_FLG_TXOP_PROT = BIT(7), + TX_CMD_FLG_VHT_NDPA = BIT(8), + TX_CMD_FLG_HT_NDPA = BIT(9), + TX_CMD_FLG_CSI_FDBK2HOST = BIT(10), + TX_CMD_FLG_BT_DIS = BIT(12), + TX_CMD_FLG_SEQ_CTL = BIT(13), + TX_CMD_FLG_MORE_FRAG = BIT(14), + TX_CMD_FLG_NEXT_FRAME = BIT(15), + TX_CMD_FLG_TSF = BIT(16), + TX_CMD_FLG_CALIB = BIT(17), + TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18), + TX_CMD_FLG_AGG_START = BIT(19), + TX_CMD_FLG_MH_PAD = BIT(20), + TX_CMD_FLG_RESP_TO_DRV = BIT(21), + TX_CMD_FLG_CCMP_AGG = BIT(22), + TX_CMD_FLG_TKIP_MIC_DONE = BIT(23), + TX_CMD_FLG_CTS_ONLY = BIT(24), + TX_CMD_FLG_DUR = BIT(25), + TX_CMD_FLG_FW_DROP = BIT(26), + TX_CMD_FLG_EXEC_PAPD = BIT(27), + TX_CMD_FLG_PAPD_TYPE = BIT(28), + TX_CMD_FLG_HCCA_CHUNK = BIT(31) +}; /* TX_FLAGS_BITS_API_S_VER_1 */ + +/* + * TX command security control + */ +#define TX_CMD_SEC_WEP 0x01 +#define TX_CMD_SEC_CCM 0x02 +#define TX_CMD_SEC_TKIP 0x03 +#define TX_CMD_SEC_WEP_KEY_IDX_POS 6 +#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0 +#define TX_CMD_SEC_KEY128 0x08 + +/* TODO: how does these values are OK with only 16 bit variable??? */ +/* + * TX command next frame info + * + * bits 0:2 - security control (TX_CMD_SEC_*) + * bit 3 - immediate ACK required + * bit 4 - rate is taken from STA table + * bit 5 - frame belongs to BA stream + * bit 6 - immediate BA response expected + * bit 7 - unused + * bits 8:15 - Station ID + * bits 16:31 - rate + */ +#define TX_CMD_NEXT_FRAME_ACK_MSK (0x8) +#define TX_CMD_NEXT_FRAME_STA_RATE_MSK (0x10) +#define TX_CMD_NEXT_FRAME_BA_MSK (0x20) +#define TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK (0x40) +#define TX_CMD_NEXT_FRAME_FLAGS_MSK (0xf8) +#define TX_CMD_NEXT_FRAME_STA_ID_MSK (0xff00) +#define TX_CMD_NEXT_FRAME_STA_ID_POS (8) +#define TX_CMD_NEXT_FRAME_RATE_MSK (0xffff0000) +#define TX_CMD_NEXT_FRAME_RATE_POS (16) + +/* + * TX command Frame life time in us - to be written in pm_frame_timeout + */ +#define TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF +#define TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/ +#define TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */ +#define TX_CMD_LIFE_TIME_EXPIRED_FRAME 0 + +/* + * TID for non QoS frames - to be written in tid_tspec + */ +#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT + +/* + * Limits on the retransmissions - to be written in {data,rts}_retry_limit + */ +#define IWL_DEFAULT_TX_RETRY 15 +#define IWL_MGMT_DFAULT_RETRY_LIMIT 3 +#define IWL_RTS_DFAULT_RETRY_LIMIT 60 +#define IWL_BAR_DFAULT_RETRY_LIMIT 60 +#define IWL_LOW_RETRY_LIMIT 7 + +/* TODO: complete documentation for try_cnt and btkill_cnt */ +/** + * struct iwl_tx_cmd - TX command struct to FW + * ( TX_CMD = 0x1c ) + * @len: in bytes of the payload, see below for details + * @next_frame_len: same as len, but for next frame (0 if not applicable) + * Used for fragmentation and bursting, but not in 11n aggregation. + * @tx_flags: combination of TX_CMD_FLG_* + * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is + * cleared. Combination of RATE_MCS_* + * @sta_id: index of destination station in FW station table + * @sec_ctl: security control, TX_CMD_SEC_* + * @initial_rate_index: index into the the rate table for initial TX attempt. + * Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames. + * @key: security key + * @next_frame_flags: TX_CMD_SEC_* and TX_CMD_NEXT_FRAME_* + * @life_time: frame life time (usecs??) + * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt + + * btkill_cnd + reserved), first 32 bits. "0" disables usage. + * @dram_msb_ptr: upper bits of the scratch physical address + * @rts_retry_limit: max attempts for RTS + * @data_retry_limit: max attempts to send the data packet + * @tid_spec: TID/tspec + * @pm_frame_timeout: PM TX frame timeout + * @driver_txop: duration od EDCA TXOP, in 32-usec units. Set this if not + * specified by HCCA protocol + * + * The byte count (both len and next_frame_len) includes MAC header + * (24/26/30/32 bytes) + * + 2 bytes pad if 26/30 header size + * + 8 byte IV for CCM or TKIP (not used for WEP) + * + Data payload + * + 8-byte MIC (not used for CCM/WEP) + * It does not include post-MAC padding, i.e., + * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes. + * Range of len: 14-2342 bytes. + * + * After the struct fields the MAC header is placed, plus any padding, + * and then the actial payload. + */ +struct iwl_tx_cmd { + __le16 len; + __le16 next_frame_len; + __le32 tx_flags; + /* DRAM_SCRATCH_API_U_VER_1 */ + u8 try_cnt; + u8 btkill_cnt; + __le16 reserved; + __le32 rate_n_flags; + u8 sta_id; + u8 sec_ctl; + u8 initial_rate_index; + u8 reserved2; + u8 key[16]; + __le16 next_frame_flags; + __le16 reserved3; + __le32 life_time; + __le32 dram_lsb_ptr; + u8 dram_msb_ptr; + u8 rts_retry_limit; + u8 data_retry_limit; + u8 tid_tspec; + __le16 pm_frame_timeout; + __le16 driver_txop; + u8 payload[0]; + struct ieee80211_hdr hdr[0]; +} __packed; /* TX_CMD_API_S_VER_3 */ + +/* + * TX response related data + */ + +/* + * enum iwl_tx_status - status that is returned by the fw after attempts to Tx + * @TX_STATUS_SUCCESS: + * @TX_STATUS_DIRECT_DONE: + * @TX_STATUS_POSTPONE_DELAY: + * @TX_STATUS_POSTPONE_FEW_BYTES: + * @TX_STATUS_POSTPONE_BT_PRIO: + * @TX_STATUS_POSTPONE_QUIET_PERIOD: + * @TX_STATUS_POSTPONE_CALC_TTAK: + * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY: + * @TX_STATUS_FAIL_SHORT_LIMIT: + * @TX_STATUS_FAIL_LONG_LIMIT: + * @TX_STATUS_FAIL_UNDERRUN: + * @TX_STATUS_FAIL_DRAIN_FLOW: + * @TX_STATUS_FAIL_RFKILL_FLUSH: + * @TX_STATUS_FAIL_LIFE_EXPIRE: + * @TX_STATUS_FAIL_DEST_PS: + * @TX_STATUS_FAIL_HOST_ABORTED: + * @TX_STATUS_FAIL_BT_RETRY: + * @TX_STATUS_FAIL_STA_INVALID: + * @TX_TATUS_FAIL_FRAG_DROPPED: + * @TX_STATUS_FAIL_TID_DISABLE: + * @TX_STATUS_FAIL_FIFO_FLUSHED: + * @TX_STATUS_FAIL_SMALL_CF_POLL: + * @TX_STATUS_FAIL_FW_DROP: + * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and + * STA table + * @TX_FRAME_STATUS_INTERNAL_ABORT: + * @TX_MODE_MSK: + * @TX_MODE_NO_BURST: + * @TX_MODE_IN_BURST_SEQ: + * @TX_MODE_FIRST_IN_BURST: + * @TX_QUEUE_NUM_MSK: + * + * Valid only if frame_count =1 + * TODO: complete documentation + */ +enum iwl_tx_status { + TX_STATUS_MSK = 0x000000ff, + TX_STATUS_SUCCESS = 0x01, + TX_STATUS_DIRECT_DONE = 0x02, + /* postpone TX */ + TX_STATUS_POSTPONE_DELAY = 0x40, + TX_STATUS_POSTPONE_FEW_BYTES = 0x41, + TX_STATUS_POSTPONE_BT_PRIO = 0x42, + TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43, + TX_STATUS_POSTPONE_CALC_TTAK = 0x44, + /* abort TX */ + TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81, + TX_STATUS_FAIL_SHORT_LIMIT = 0x82, + TX_STATUS_FAIL_LONG_LIMIT = 0x83, + TX_STATUS_FAIL_UNDERRUN = 0x84, + TX_STATUS_FAIL_DRAIN_FLOW = 0x85, + TX_STATUS_FAIL_RFKILL_FLUSH = 0x86, + TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, + TX_STATUS_FAIL_DEST_PS = 0x88, + TX_STATUS_FAIL_HOST_ABORTED = 0x89, + TX_STATUS_FAIL_BT_RETRY = 0x8a, + TX_STATUS_FAIL_STA_INVALID = 0x8b, + TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, + TX_STATUS_FAIL_TID_DISABLE = 0x8d, + TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, + TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f, + TX_STATUS_FAIL_FW_DROP = 0x90, + TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91, + TX_STATUS_INTERNAL_ABORT = 0x92, + TX_MODE_MSK = 0x00000f00, + TX_MODE_NO_BURST = 0x00000000, + TX_MODE_IN_BURST_SEQ = 0x00000100, + TX_MODE_FIRST_IN_BURST = 0x00000200, + TX_QUEUE_NUM_MSK = 0x0001f000, + TX_NARROW_BW_MSK = 0x00060000, + TX_NARROW_BW_1DIV2 = 0x00020000, + TX_NARROW_BW_1DIV4 = 0x00040000, + TX_NARROW_BW_1DIV8 = 0x00060000, +}; + +/* + * enum iwl_tx_agg_status - TX aggregation status + * @AGG_TX_STATE_STATUS_MSK: + * @AGG_TX_STATE_TRANSMITTED: + * @AGG_TX_STATE_UNDERRUN: + * @AGG_TX_STATE_BT_PRIO: + * @AGG_TX_STATE_FEW_BYTES: + * @AGG_TX_STATE_ABORT: + * @AGG_TX_STATE_LAST_SENT_TTL: + * @AGG_TX_STATE_LAST_SENT_TRY_CNT: + * @AGG_TX_STATE_LAST_SENT_BT_KILL: + * @AGG_TX_STATE_SCD_QUERY: + * @AGG_TX_STATE_TEST_BAD_CRC32: + * @AGG_TX_STATE_RESPONSE: + * @AGG_TX_STATE_DUMP_TX: + * @AGG_TX_STATE_DELAY_TX: + * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries + * occur if tx failed for this frame when it was a member of a previous + * aggregation block). If rate scaling is used, retry count indicates the + * rate table entry used for all frames in the new agg. + *@ AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for + * this frame + * + * TODO: complete documentation + */ +enum iwl_tx_agg_status { + AGG_TX_STATE_STATUS_MSK = 0x00fff, + AGG_TX_STATE_TRANSMITTED = 0x000, + AGG_TX_STATE_UNDERRUN = 0x001, + AGG_TX_STATE_BT_PRIO = 0x002, + AGG_TX_STATE_FEW_BYTES = 0x004, + AGG_TX_STATE_ABORT = 0x008, + AGG_TX_STATE_LAST_SENT_TTL = 0x010, + AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020, + AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040, + AGG_TX_STATE_SCD_QUERY = 0x080, + AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100, + AGG_TX_STATE_RESPONSE = 0x1ff, + AGG_TX_STATE_DUMP_TX = 0x200, + AGG_TX_STATE_DELAY_TX = 0x400, + AGG_TX_STATE_TRY_CNT_POS = 12, + AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS, +}; + +#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL| \ + AGG_TX_STATE_LAST_SENT_TRY_CNT| \ + AGG_TX_STATE_LAST_SENT_BT_KILL) + +/* + * The mask below describes a status where we are absolutely sure that the MPDU + * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've + * written the bytes to the TXE, but we know nothing about what the DSP did. + */ +#define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \ + AGG_TX_STATE_ABORT | \ + AGG_TX_STATE_SCD_QUERY) + +/* + * REPLY_TX = 0x1c (response) + * + * This response may be in one of two slightly different formats, indicated + * by the frame_count field: + * + * 1) No aggregation (frame_count == 1). This reports Tx results for a single + * frame. Multiple attempts, at various bit rates, may have been made for + * this frame. + * + * 2) Aggregation (frame_count > 1). This reports Tx results for two or more + * frames that used block-acknowledge. All frames were transmitted at + * same rate. Rate scaling may have been used if first frame in this new + * agg block failed in previous agg block(s). + * + * Note that, for aggregation, ACK (block-ack) status is not delivered + * here; block-ack has not been received by the time the device records + * this status. + * This status relates to reasons the tx might have been blocked or aborted + * within the device, rather than whether it was received successfully by + * the destination station. + */ + +/** + * struct agg_tx_status - per packet TX aggregation status + * @status: enum iwl_tx_agg_status + * @sequence: Sequence # for this frame's Tx cmd (not SSN!) + */ +struct agg_tx_status { + __le16 status; + __le16 sequence; +} __packed; + +/* + * definitions for initial rate index field + * bits [3:0] initial rate index + * bits [6:4] rate table color, used for the initial rate + * bit-7 invalid rate indication + */ +#define TX_RES_INIT_RATE_INDEX_MSK 0x0f +#define TX_RES_RATE_TABLE_COLOR_MSK 0x70 +#define TX_RES_INV_RATE_INDEX_MSK 0x80 + +#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) +#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) + +/** + * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet + * ( REPLY_TX = 0x1c ) + * @frame_count: 1 no aggregation, >1 aggregation + * @bt_kill_count: num of times blocked by bluetooth (unused for agg) + * @failure_rts: num of failures due to unsuccessful RTS + * @failure_frame: num failures due to no ACK (unused for agg) + * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the + * Tx of all the batch. RATE_MCS_* + * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK. + * for agg: RTS + CTS + aggregation tx time + block-ack time. + * in usec. + * @pa_status: tx power info + * @pa_integ_res_a: tx power info + * @pa_integ_res_b: tx power info + * @pa_integ_res_c: tx power info + * @measurement_req_id: tx power info + * @tfd_info: TFD information set by the FH + * @seq_ctl: sequence control from the Tx cmd + * @byte_cnt: byte count from the Tx cmd + * @tlc_info: TLC rate info + * @ra_tid: bits [3:0] = ra, bits [7:4] = tid + * @frame_ctrl: frame control + * @status: for non-agg: frame status TX_STATUS_* + * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields + * follow this one, up to frame_count. + * + * After the array of statuses comes the SSN of the SCD. Look at + * %iwl_mvm_get_scd_ssn for more details. + */ +struct iwl_mvm_tx_resp { + u8 frame_count; + u8 bt_kill_count; + u8 failure_rts; + u8 failure_frame; + __le32 initial_rate; + __le16 wireless_media_time; + + u8 pa_status; + u8 pa_integ_res_a[3]; + u8 pa_integ_res_b[3]; + u8 pa_integ_res_c[3]; + __le16 measurement_req_id; + __le16 reserved; + + __le32 tfd_info; + __le16 seq_ctl; + __le16 byte_cnt; + u8 tlc_info; + u8 ra_tid; + __le16 frame_ctrl; + + struct agg_tx_status status; +} __packed; /* TX_RSP_API_S_VER_3 */ + +/** + * struct iwl_mvm_ba_notif - notifies about reception of BA + * ( BA_NOTIF = 0xc5 ) + * @sta_addr_lo32: lower 32 bits of the MAC address + * @sta_addr_hi16: upper 16 bits of the MAC address + * @sta_id: Index of recipient (BA-sending) station in fw's station table + * @tid: tid of the session + * @seq_ctl: + * @bitmap: the bitmap of the BA notification as seen in the air + * @scd_flow: the tx queue this BA relates to + * @scd_ssn: the index of the last contiguously sent packet + * @txed: number of Txed frames in this batch + * @txed_2_done: number of Acked frames in this batch + */ +struct iwl_mvm_ba_notif { + __le32 sta_addr_lo32; + __le16 sta_addr_hi16; + __le16 reserved; + + u8 sta_id; + u8 tid; + __le16 seq_ctl; + __le64 bitmap; + __le16 scd_flow; + __le16 scd_ssn; + u8 txed; + u8 txed_2_done; + __le16 reserved1; +} __packed; + +/* + * struct iwl_mac_beacon_cmd - beacon template command + * @tx: the tx commands associated with the beacon frame + * @template_id: currently equal to the mac context id of the coresponding + * mac. + * @tim_idx: the offset of the tim IE in the beacon + * @tim_size: the length of the tim IE + * @frame: the template of the beacon frame + */ +struct iwl_mac_beacon_cmd { + struct iwl_tx_cmd tx; + __le32 template_id; + __le32 tim_idx; + __le32 tim_size; + struct ieee80211_hdr frame[0]; +} __packed; + +/** + * enum iwl_dump_control - dump (flush) control flags + * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty + * and the TFD queues are empty. + */ +enum iwl_dump_control { + DUMP_TX_FIFO_FLUSH = BIT(1), +}; + +/** + * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command + * @queues_ctl: bitmap of queues to flush + * @flush_ctl: control flags + * @reserved: reserved + */ +struct iwl_tx_path_flush_cmd { + __le32 queues_ctl; + __le16 flush_ctl; + __le16 reserved; +} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */ + +/** + * iwl_mvm_get_scd_ssn - returns the SSN of the SCD + * @tx_resp: the Tx response from the fw (agg or non-agg) + * + * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since + * it can't know that everything will go well until the end of the AMPDU, it + * can't know in advance the number of MPDUs that will be sent in the current + * batch. This is why it writes the agg Tx response while it fetches the MPDUs. + * Hence, it can't know in advance what the SSN of the SCD will be at the end + * of the batch. This is why the SSN of the SCD is written at the end of the + * whole struct at a variable offset. This function knows how to cope with the + * variable offset and returns the SSN of the SCD. + */ +static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp) +{ + return le32_to_cpup((__le32 *)&tx_resp->status + + tx_resp->frame_count) & 0xfff; +} + +#endif /* __fw_api_tx_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h new file mode 100644 index 000000000000..23eebda848b0 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -0,0 +1,952 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __fw_api_h__ +#define __fw_api_h__ + +#include "fw-api-rs.h" +#include "fw-api-tx.h" +#include "fw-api-sta.h" +#include "fw-api-mac.h" +#include "fw-api-power.h" +#include "fw-api-d3.h" + +/* queue and FIFO numbers by usage */ +enum { + IWL_MVM_OFFCHANNEL_QUEUE = 8, + IWL_MVM_CMD_QUEUE = 9, + IWL_MVM_AUX_QUEUE = 15, + IWL_MVM_FIRST_AGG_QUEUE = 16, + IWL_MVM_NUM_QUEUES = 20, + IWL_MVM_LAST_AGG_QUEUE = IWL_MVM_NUM_QUEUES - 1, + IWL_MVM_CMD_FIFO = 7 +}; + +#define IWL_MVM_STATION_COUNT 16 + +/* commands */ +enum { + MVM_ALIVE = 0x1, + REPLY_ERROR = 0x2, + + INIT_COMPLETE_NOTIF = 0x4, + + /* PHY context commands */ + PHY_CONTEXT_CMD = 0x8, + DBG_CFG = 0x9, + + /* station table */ + ADD_STA = 0x18, + REMOVE_STA = 0x19, + + /* TX */ + TX_CMD = 0x1c, + TXPATH_FLUSH = 0x1e, + MGMT_MCAST_KEY = 0x1f, + + /* global key */ + WEP_KEY = 0x20, + + /* MAC and Binding commands */ + MAC_CONTEXT_CMD = 0x28, + TIME_EVENT_CMD = 0x29, /* both CMD and response */ + TIME_EVENT_NOTIFICATION = 0x2a, + BINDING_CONTEXT_CMD = 0x2b, + TIME_QUOTA_CMD = 0x2c, + + LQ_CMD = 0x4e, + + /* Calibration */ + TEMPERATURE_NOTIFICATION = 0x62, + CALIBRATION_CFG_CMD = 0x65, + CALIBRATION_RES_NOTIFICATION = 0x66, + CALIBRATION_COMPLETE_NOTIFICATION = 0x67, + RADIO_VERSION_NOTIFICATION = 0x68, + + /* Scan offload */ + SCAN_OFFLOAD_REQUEST_CMD = 0x51, + SCAN_OFFLOAD_ABORT_CMD = 0x52, + SCAN_OFFLOAD_COMPLETE = 0x6D, + SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, + SCAN_OFFLOAD_CONFIG_CMD = 0x6f, + + /* Phy */ + PHY_CONFIGURATION_CMD = 0x6a, + CALIB_RES_NOTIF_PHY_DB = 0x6b, + /* PHY_DB_CMD = 0x6c, */ + + /* Power */ + POWER_TABLE_CMD = 0x77, + + /* Scanning */ + SCAN_REQUEST_CMD = 0x80, + SCAN_ABORT_CMD = 0x81, + SCAN_START_NOTIFICATION = 0x82, + SCAN_RESULTS_NOTIFICATION = 0x83, + SCAN_COMPLETE_NOTIFICATION = 0x84, + + /* NVM */ + NVM_ACCESS_CMD = 0x88, + + SET_CALIB_DEFAULT_CMD = 0x8e, + + BEACON_TEMPLATE_CMD = 0x91, + TX_ANT_CONFIGURATION_CMD = 0x98, + STATISTICS_NOTIFICATION = 0x9d, + + /* RF-KILL commands and notifications */ + CARD_STATE_CMD = 0xa0, + CARD_STATE_NOTIFICATION = 0xa1, + + REPLY_RX_PHY_CMD = 0xc0, + REPLY_RX_MPDU_CMD = 0xc1, + BA_NOTIF = 0xc5, + + REPLY_DEBUG_CMD = 0xf0, + DEBUG_LOG_MSG = 0xf7, + + /* D3 commands/notifications */ + D3_CONFIG_CMD = 0xd3, + PROT_OFFLOAD_CONFIG_CMD = 0xd4, + OFFLOADS_QUERY_CMD = 0xd5, + REMOTE_WAKE_CONFIG_CMD = 0xd6, + + /* for WoWLAN in particular */ + WOWLAN_PATTERNS = 0xe0, + WOWLAN_CONFIGURATION = 0xe1, + WOWLAN_TSC_RSC_PARAM = 0xe2, + WOWLAN_TKIP_PARAM = 0xe3, + WOWLAN_KEK_KCK_MATERIAL = 0xe4, + WOWLAN_GET_STATUSES = 0xe5, + WOWLAN_TX_POWER_PER_DB = 0xe6, + + /* and for NetDetect */ + NET_DETECT_CONFIG_CMD = 0x54, + NET_DETECT_PROFILES_QUERY_CMD = 0x56, + NET_DETECT_PROFILES_CMD = 0x57, + NET_DETECT_HOTSPOTS_CMD = 0x58, + NET_DETECT_HOTSPOTS_QUERY_CMD = 0x59, + + REPLY_MAX = 0xff, +}; + +/** + * struct iwl_cmd_response - generic response struct for most commands + * @status: status of the command asked, changes for each one + */ +struct iwl_cmd_response { + __le32 status; +}; + +/* + * struct iwl_tx_ant_cfg_cmd + * @valid: valid antenna configuration + */ +struct iwl_tx_ant_cfg_cmd { + __le32 valid; +} __packed; + +/* + * Calibration control struct. + * Sent as part of the phy configuration command. + * @flow_trigger: bitmap for which calibrations to perform according to + * flow triggers. + * @event_trigger: bitmap for which calibrations to perform according to + * event triggers. + */ +struct iwl_calib_ctrl { + __le32 flow_trigger; + __le32 event_trigger; +} __packed; + +/* This enum defines the bitmap of various calibrations to enable in both + * init ucode and runtime ucode through CALIBRATION_CFG_CMD. + */ +enum iwl_calib_cfg { + IWL_CALIB_CFG_XTAL_IDX = BIT(0), + IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1), + IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2), + IWL_CALIB_CFG_PAPD_IDX = BIT(3), + IWL_CALIB_CFG_TX_PWR_IDX = BIT(4), + IWL_CALIB_CFG_DC_IDX = BIT(5), + IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6), + IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7), + IWL_CALIB_CFG_TX_IQ_IDX = BIT(8), + IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9), + IWL_CALIB_CFG_RX_IQ_IDX = BIT(10), + IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11), + IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12), + IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13), + IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14), + IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15), + IWL_CALIB_CFG_DAC_IDX = BIT(16), + IWL_CALIB_CFG_ABS_IDX = BIT(17), + IWL_CALIB_CFG_AGC_IDX = BIT(18), +}; + +/* + * Phy configuration command. + */ +struct iwl_phy_cfg_cmd { + __le32 phy_cfg; + struct iwl_calib_ctrl calib_control; +} __packed; + +#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1)) +#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3)) +#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5)) +#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7)) +#define PHY_CFG_TX_CHAIN_A BIT(8) +#define PHY_CFG_TX_CHAIN_B BIT(9) +#define PHY_CFG_TX_CHAIN_C BIT(10) +#define PHY_CFG_RX_CHAIN_A BIT(12) +#define PHY_CFG_RX_CHAIN_B BIT(13) +#define PHY_CFG_RX_CHAIN_C BIT(14) + + +/* Target of the NVM_ACCESS_CMD */ +enum { + NVM_ACCESS_TARGET_CACHE = 0, + NVM_ACCESS_TARGET_OTP = 1, + NVM_ACCESS_TARGET_EEPROM = 2, +}; + +/** + * struct iwl_nvm_access_cmd_ver1 - Request the device to send the NVM. + * @op_code: 0 - read, 1 - write. + * @target: NVM_ACCESS_TARGET_*. should be 0 for read. + * @cache_refresh: 0 - None, 1- NVM. + * @offset: offset in the nvm data. + * @length: of the chunk. + * @data: empty on read, the NVM chunk on write + */ +struct iwl_nvm_access_cmd_ver1 { + u8 op_code; + u8 target; + u8 cache_refresh; + u8 reserved; + __le16 offset; + __le16 length; + u8 data[]; +} __packed; /* NVM_ACCESS_CMD_API_S_VER_1 */ + +/** + * struct iwl_nvm_access_resp_ver1 - response to NVM_ACCESS_CMD + * @offset: the offset in the nvm data + * @length: of the chunk + * @data: the nvm chunk on when NVM_ACCESS_CMD was read, nothing on write + */ +struct iwl_nvm_access_resp_ver1 { + __le16 offset; + __le16 length; + u8 data[]; +} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_1 */ + +/* Section types for NVM_ACCESS_CMD version 2 */ +enum { + NVM_SECTION_TYPE_HW = 0, + NVM_SECTION_TYPE_SW, + NVM_SECTION_TYPE_PAPD, + NVM_SECTION_TYPE_BT, + NVM_SECTION_TYPE_CALIBRATION, + NVM_SECTION_TYPE_PRODUCTION, + NVM_SECTION_TYPE_POST_FCS_CALIB, + NVM_NUM_OF_SECTIONS, +}; + +/** + * struct iwl_nvm_access_cmd_ver2 - Request the device to send an NVM section + * @op_code: 0 - read, 1 - write + * @target: NVM_ACCESS_TARGET_* + * @type: NVM_SECTION_TYPE_* + * @offset: offset in bytes into the section + * @length: in bytes, to read/write + * @data: if write operation, the data to write. On read its empty + */ +struct iwl_nvm_access_cmd_ver2 { + u8 op_code; + u8 target; + __le16 type; + __le16 offset; + __le16 length; + u8 data[]; +} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ + +/** + * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD + * @offset: offset in bytes into the section + * @length: in bytes, either how much was written or read + * @type: NVM_SECTION_TYPE_* + * @status: 0 for success, fail otherwise + * @data: if read operation, the data returned. Empty on write. + */ +struct iwl_nvm_access_resp_ver2 { + __le16 offset; + __le16 length; + __le16 type; + __le16 status; + u8 data[]; +} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */ + +/* MVM_ALIVE 0x1 */ + +/* alive response is_valid values */ +#define ALIVE_RESP_UCODE_OK BIT(0) +#define ALIVE_RESP_RFKILL BIT(1) + +/* alive response ver_type values */ +enum { + FW_TYPE_HW = 0, + FW_TYPE_PROT = 1, + FW_TYPE_AP = 2, + FW_TYPE_WOWLAN = 3, + FW_TYPE_TIMING = 4, + FW_TYPE_WIPAN = 5 +}; + +/* alive response ver_subtype values */ +enum { + FW_SUBTYPE_FULL_FEATURE = 0, + FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */ + FW_SUBTYPE_REDUCED = 2, + FW_SUBTYPE_ALIVE_ONLY = 3, + FW_SUBTYPE_WOWLAN = 4, + FW_SUBTYPE_AP_SUBTYPE = 5, + FW_SUBTYPE_WIPAN = 6, + FW_SUBTYPE_INITIALIZE = 9 +}; + +#define IWL_ALIVE_STATUS_ERR 0xDEAD +#define IWL_ALIVE_STATUS_OK 0xCAFE + +#define IWL_ALIVE_FLG_RFKILL BIT(0) + +struct mvm_alive_resp { + __le16 status; + __le16 flags; + u8 ucode_minor; + u8 ucode_major; + __le16 id; + u8 api_minor; + u8 api_major; + u8 ver_subtype; + u8 ver_type; + u8 mac; + u8 opt; + __le16 reserved2; + __le32 timestamp; + __le32 error_event_table_ptr; /* SRAM address for error log */ + __le32 log_event_table_ptr; /* SRAM address for event log */ + __le32 cpu_register_ptr; + __le32 dbgm_config_ptr; + __le32 alive_counter_ptr; + __le32 scd_base_ptr; /* SRAM address for SCD */ +} __packed; /* ALIVE_RES_API_S_VER_1 */ + +/* Error response/notification */ +enum { + FW_ERR_UNKNOWN_CMD = 0x0, + FW_ERR_INVALID_CMD_PARAM = 0x1, + FW_ERR_SERVICE = 0x2, + FW_ERR_ARC_MEMORY = 0x3, + FW_ERR_ARC_CODE = 0x4, + FW_ERR_WATCH_DOG = 0x5, + FW_ERR_WEP_GRP_KEY_INDX = 0x10, + FW_ERR_WEP_KEY_SIZE = 0x11, + FW_ERR_OBSOLETE_FUNC = 0x12, + FW_ERR_UNEXPECTED = 0xFE, + FW_ERR_FATAL = 0xFF +}; + +/** + * struct iwl_error_resp - FW error indication + * ( REPLY_ERROR = 0x2 ) + * @error_type: one of FW_ERR_* + * @cmd_id: the command ID for which the error occured + * @bad_cmd_seq_num: sequence number of the erroneous command + * @error_service: which service created the error, applicable only if + * error_type = 2, otherwise 0 + * @timestamp: TSF in usecs. + */ +struct iwl_error_resp { + __le32 error_type; + u8 cmd_id; + u8 reserved1; + __le16 bad_cmd_seq_num; + __le32 error_service; + __le64 timestamp; +} __packed; + + +/* Common PHY, MAC and Bindings definitions */ + +#define MAX_MACS_IN_BINDING (3) +#define MAX_BINDINGS (4) +#define AUX_BINDING_INDEX (3) +#define MAX_PHYS (4) + +/* Used to extract ID and color from the context dword */ +#define FW_CTXT_ID_POS (0) +#define FW_CTXT_ID_MSK (0xff << FW_CTXT_ID_POS) +#define FW_CTXT_COLOR_POS (8) +#define FW_CTXT_COLOR_MSK (0xff << FW_CTXT_COLOR_POS) +#define FW_CTXT_INVALID (0xffffffff) + +#define FW_CMD_ID_AND_COLOR(_id, _color) ((_id << FW_CTXT_ID_POS) |\ + (_color << FW_CTXT_COLOR_POS)) + +/* Possible actions on PHYs, MACs and Bindings */ +enum { + FW_CTXT_ACTION_STUB = 0, + FW_CTXT_ACTION_ADD, + FW_CTXT_ACTION_MODIFY, + FW_CTXT_ACTION_REMOVE, + FW_CTXT_ACTION_NUM +}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ + +/* Time Events */ + +/* Time Event types, according to MAC type */ +enum iwl_time_event_type { + /* BSS Station Events */ + TE_BSS_STA_AGGRESSIVE_ASSOC, + TE_BSS_STA_ASSOC, + TE_BSS_EAP_DHCP_PROT, + TE_BSS_QUIET_PERIOD, + + /* P2P Device Events */ + TE_P2P_DEVICE_DISCOVERABLE, + TE_P2P_DEVICE_LISTEN, + TE_P2P_DEVICE_ACTION_SCAN, + TE_P2P_DEVICE_FULL_SCAN, + + /* P2P Client Events */ + TE_P2P_CLIENT_AGGRESSIVE_ASSOC, + TE_P2P_CLIENT_ASSOC, + TE_P2P_CLIENT_QUIET_PERIOD, + + /* P2P GO Events */ + TE_P2P_GO_ASSOC_PROT, + TE_P2P_GO_REPETITIVE_NOA, + TE_P2P_GO_CT_WINDOW, + + /* WiDi Sync Events */ + TE_WIDI_TX_SYNC, + + TE_MAX +}; /* MAC_EVENT_TYPE_API_E_VER_1 */ + +/* Time Event dependencies: none, on another TE, or in a specific time */ +enum { + TE_INDEPENDENT = 0, + TE_DEP_OTHER = 1, + TE_DEP_TSF = 2, + TE_EVENT_SOCIOPATHIC = 4, +}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ + +/* When to send Time Event notifications and to whom (internal = FW) */ +enum { + TE_NOTIF_NONE = 0, + TE_NOTIF_HOST_START = 0x1, + TE_NOTIF_HOST_END = 0x2, + TE_NOTIF_INTERNAL_START = 0x4, + TE_NOTIF_INTERNAL_END = 0x8 +}; /* MAC_EVENT_ACTION_API_E_VER_1 */ + +/* + * @TE_FRAG_NONE: fragmentation of the time event is NOT allowed. + * @TE_FRAG_SINGLE: fragmentation of the time event is allowed, but only + * the first fragment is scheduled. + * @TE_FRAG_DUAL: fragmentation of the time event is allowed, but only + * the first 2 fragments are scheduled. + * @TE_FRAG_ENDLESS: fragmentation of the time event is allowed, and any number + * of fragments are valid. + * + * Other than the constant defined above, specifying a fragmentation value 'x' + * means that the event can be fragmented but only the first 'x' will be + * scheduled. + */ +enum { + TE_FRAG_NONE = 0, + TE_FRAG_SINGLE = 1, + TE_FRAG_DUAL = 2, + TE_FRAG_ENDLESS = 0xffffffff +}; + +/* Repeat the time event endlessly (until removed) */ +#define TE_REPEAT_ENDLESS (0xffffffff) +/* If a Time Event has bounded repetitions, this is the maximal value */ +#define TE_REPEAT_MAX_MSK (0x0fffffff) +/* If a Time Event can be fragmented, this is the max number of fragments */ +#define TE_FRAG_MAX_MSK (0x0fffffff) + +/** + * struct iwl_time_event_cmd - configuring Time Events + * ( TIME_EVENT_CMD = 0x29 ) + * @id_and_color: ID and color of the relevant MAC + * @action: action to perform, one of FW_CTXT_ACTION_* + * @id: this field has two meanings, depending on the action: + * If the action is ADD, then it means the type of event to add. + * For all other actions it is the unique event ID assigned when the + * event was added by the FW. + * @apply_time: When to start the Time Event (in GP2) + * @max_delay: maximum delay to event's start (apply time), in TU + * @depends_on: the unique ID of the event we depend on (if any) + * @interval: interval between repetitions, in TU + * @interval_reciprocal: 2^32 / interval + * @duration: duration of event in TU + * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS + * @dep_policy: one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF + * @is_present: 0 or 1, are we present or absent during the Time Event + * @max_frags: maximal number of fragments the Time Event can be divided to + * @notify: notifications using TE_NOTIF_* (whom to notify when) + */ +struct iwl_time_event_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + __le32 id; + /* MAC_TIME_EVENT_DATA_API_S_VER_1 */ + __le32 apply_time; + __le32 max_delay; + __le32 dep_policy; + __le32 depends_on; + __le32 is_present; + __le32 max_frags; + __le32 interval; + __le32 interval_reciprocal; + __le32 duration; + __le32 repeat; + __le32 notify; +} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */ + +/** + * struct iwl_time_event_resp - response structure to iwl_time_event_cmd + * @status: bit 0 indicates success, all others specify errors + * @id: the Time Event type + * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE + * @id_and_color: ID and color of the relevant MAC + */ +struct iwl_time_event_resp { + __le32 status; + __le32 id; + __le32 unique_id; + __le32 id_and_color; +} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */ + +/** + * struct iwl_time_event_notif - notifications of time event start/stop + * ( TIME_EVENT_NOTIFICATION = 0x2a ) + * @timestamp: action timestamp in GP2 + * @session_id: session's unique id + * @unique_id: unique id of the Time Event itself + * @id_and_color: ID and color of the relevant MAC + * @action: one of TE_NOTIF_START or TE_NOTIF_END + * @status: true if scheduled, false otherwise (not executed) + */ +struct iwl_time_event_notif { + __le32 timestamp; + __le32 session_id; + __le32 unique_id; + __le32 id_and_color; + __le32 action; + __le32 status; +} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */ + + +/* Bindings and Time Quota */ + +/** + * struct iwl_binding_cmd - configuring bindings + * ( BINDING_CONTEXT_CMD = 0x2b ) + * @id_and_color: ID and color of the relevant Binding + * @action: action to perform, one of FW_CTXT_ACTION_* + * @macs: array of MAC id and colors which belong to the binding + * @phy: PHY id and color which belongs to the binding + */ +struct iwl_binding_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* BINDING_DATA_API_S_VER_1 */ + __le32 macs[MAX_MACS_IN_BINDING]; + __le32 phy; +} __packed; /* BINDING_CMD_API_S_VER_1 */ + +/* The maximal number of fragments in the FW's schedule session */ +#define IWL_MVM_MAX_QUOTA 128 + +/** + * struct iwl_time_quota_data - configuration of time quota per binding + * @id_and_color: ID and color of the relevant Binding + * @quota: absolute time quota in TU. The scheduler will try to divide the + * remainig quota (after Time Events) according to this quota. + * @max_duration: max uninterrupted context duration in TU + */ +struct iwl_time_quota_data { + __le32 id_and_color; + __le32 quota; + __le32 max_duration; +} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */ + +/** + * struct iwl_time_quota_cmd - configuration of time quota between bindings + * ( TIME_QUOTA_CMD = 0x2c ) + * @quotas: allocations per binding + */ +struct iwl_time_quota_cmd { + struct iwl_time_quota_data quotas[MAX_BINDINGS]; +} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */ + + +/* PHY context */ + +/* Supported bands */ +#define PHY_BAND_5 (0) +#define PHY_BAND_24 (1) + +/* Supported channel width, vary if there is VHT support */ +#define PHY_VHT_CHANNEL_MODE20 (0x0) +#define PHY_VHT_CHANNEL_MODE40 (0x1) +#define PHY_VHT_CHANNEL_MODE80 (0x2) +#define PHY_VHT_CHANNEL_MODE160 (0x3) + +/* + * Control channel position: + * For legacy set bit means upper channel, otherwise lower. + * For VHT - bit-2 marks if the control is lower/upper relative to center-freq + * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. + * center_freq + * | + * 40Mhz |_______|_______| + * 80Mhz |_______|_______|_______|_______| + * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______| + * code 011 010 001 000 | 100 101 110 111 + */ +#define PHY_VHT_CTRL_POS_1_BELOW (0x0) +#define PHY_VHT_CTRL_POS_2_BELOW (0x1) +#define PHY_VHT_CTRL_POS_3_BELOW (0x2) +#define PHY_VHT_CTRL_POS_4_BELOW (0x3) +#define PHY_VHT_CTRL_POS_1_ABOVE (0x4) +#define PHY_VHT_CTRL_POS_2_ABOVE (0x5) +#define PHY_VHT_CTRL_POS_3_ABOVE (0x6) +#define PHY_VHT_CTRL_POS_4_ABOVE (0x7) + +/* + * @band: PHY_BAND_* + * @channel: channel number + * @width: PHY_[VHT|LEGACY]_CHANNEL_* + * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* + */ +struct iwl_fw_channel_info { + u8 band; + u8 channel; + u8 width; + u8 ctrl_pos; +} __packed; + +#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) +#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ + (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS) +#define PHY_RX_CHAIN_VALID_POS (1) +#define PHY_RX_CHAIN_VALID_MSK \ + (0x7 << PHY_RX_CHAIN_VALID_POS) +#define PHY_RX_CHAIN_FORCE_SEL_POS (4) +#define PHY_RX_CHAIN_FORCE_SEL_MSK \ + (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS) +#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7) +#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \ + (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS) +#define PHY_RX_CHAIN_CNT_POS (10) +#define PHY_RX_CHAIN_CNT_MSK \ + (0x3 << PHY_RX_CHAIN_CNT_POS) +#define PHY_RX_CHAIN_MIMO_CNT_POS (12) +#define PHY_RX_CHAIN_MIMO_CNT_MSK \ + (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS) +#define PHY_RX_CHAIN_MIMO_FORCE_POS (14) +#define PHY_RX_CHAIN_MIMO_FORCE_MSK \ + (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS) + +/* TODO: fix the value, make it depend on firmware at runtime? */ +#define NUM_PHY_CTX 3 + +/* TODO: complete missing documentation */ +/** + * struct iwl_phy_context_cmd - config of the PHY context + * ( PHY_CONTEXT_CMD = 0x8 ) + * @id_and_color: ID and color of the relevant Binding + * @action: action to perform, one of FW_CTXT_ACTION_* + * @apply_time: 0 means immediate apply and context switch. + * other value means apply new params after X usecs + * @tx_param_color: ??? + * @channel_info: + * @txchain_info: ??? + * @rxchain_info: ??? + * @acquisition_data: ??? + * @dsp_cfg_flags: set to 0 + */ +struct iwl_phy_context_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* PHY_CONTEXT_DATA_API_S_VER_1 */ + __le32 apply_time; + __le32 tx_param_color; + struct iwl_fw_channel_info ci; + __le32 txchain_info; + __le32 rxchain_info; + __le32 acquisition_data; + __le32 dsp_cfg_flags; +} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ + +#define IWL_RX_INFO_PHY_CNT 8 +#define IWL_RX_INFO_AGC_IDX 1 +#define IWL_RX_INFO_RSSI_AB_IDX 2 +#define IWL_RX_INFO_RSSI_C_IDX 3 +#define IWL_OFDM_AGC_DB_MSK 0xfe00 +#define IWL_OFDM_AGC_DB_POS 9 +#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff +#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00 +#define IWL_OFDM_RSSI_A_POS 0 +#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000 +#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000 +#define IWL_OFDM_RSSI_B_POS 16 +#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff +#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00 +#define IWL_OFDM_RSSI_C_POS 0 + +/** + * struct iwl_rx_phy_info - phy info + * (REPLY_RX_PHY_CMD = 0xc0) + * @non_cfg_phy_cnt: non configurable DSP phy data byte count + * @cfg_phy_cnt: configurable DSP phy data byte count + * @stat_id: configurable DSP phy data set ID + * @reserved1: + * @system_timestamp: GP2 at on air rise + * @timestamp: TSF at on air rise + * @beacon_time_stamp: beacon at on-air rise + * @phy_flags: general phy flags: band, modulation, ... + * @channel: channel number + * @non_cfg_phy_buf: for various implementations of non_cfg_phy + * @rate_n_flags: RATE_MCS_* + * @byte_count: frame's byte-count + * @frame_time: frame's time on the air, based on byte count and frame rate + * calculation + * + * Before each Rx, the device sends this data. It contains PHY information + * about the reception of the packet. + */ +struct iwl_rx_phy_info { + u8 non_cfg_phy_cnt; + u8 cfg_phy_cnt; + u8 stat_id; + u8 reserved1; + __le32 system_timestamp; + __le64 timestamp; + __le32 beacon_time_stamp; + __le16 phy_flags; + __le16 channel; + __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT]; + __le32 rate_n_flags; + __le32 byte_count; + __le16 reserved2; + __le16 frame_time; +} __packed; + +struct iwl_rx_mpdu_res_start { + __le16 byte_count; + __le16 reserved; +} __packed; + +/** + * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags + * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band + * @RX_RES_PHY_FLAGS_MOD_CCK: + * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short + * @RX_RES_PHY_FLAGS_NARROW_BAND: + * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received + * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU + * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame + * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble + * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame + */ +enum iwl_rx_phy_flags { + RX_RES_PHY_FLAGS_BAND_24 = BIT(0), + RX_RES_PHY_FLAGS_MOD_CCK = BIT(1), + RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2), + RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3), + RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4), + RX_RES_PHY_FLAGS_ANTENNA_POS = 4, + RX_RES_PHY_FLAGS_AGG = BIT(7), + RX_RES_PHY_FLAGS_OFDM_HT = BIT(8), + RX_RES_PHY_FLAGS_OFDM_GF = BIT(9), + RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10), +}; + +/** + * enum iwl_mvm_rx_status - written by fw for each Rx packet + * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine + * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow + * @RX_MPDU_RES_STATUS_SRC_STA_FOUND: + * @RX_MPDU_RES_STATUS_KEY_VALID: + * @RX_MPDU_RES_STATUS_KEY_PARAM_OK: + * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed + * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked + * in the driver. + * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine + * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or + * alg = CCM only. Checks replay attack for 11w frames. Relevant only if + * %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set. + * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted + * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP + * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM + * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP + * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC + * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted + * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm + * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted + * @RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP: + * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP: + * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT: + * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame + * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK: + * @RX_MPDU_RES_STATUS_STA_ID_MSK: + * @RX_MPDU_RES_STATUS_RRF_KILL: + * @RX_MPDU_RES_STATUS_FILTERING_MSK: + * @RX_MPDU_RES_STATUS2_FILTERING_MSK: + */ +enum iwl_mvm_rx_status { + RX_MPDU_RES_STATUS_CRC_OK = BIT(0), + RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1), + RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2), + RX_MPDU_RES_STATUS_KEY_VALID = BIT(3), + RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4), + RX_MPDU_RES_STATUS_ICV_OK = BIT(5), + RX_MPDU_RES_STATUS_MIC_OK = BIT(6), + RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), + RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7), + RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8), + RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8), + RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8), + RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8), + RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8), + RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8), + RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8), + RX_MPDU_RES_STATUS_DEC_DONE = BIT(11), + RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP = BIT(12), + RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13), + RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14), + RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15), + RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000), + RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000), + RX_MPDU_RES_STATUS_RRF_KILL = BIT(29), + RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000), + RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000), +}; + +/** + * struct iwl_radio_version_notif - information on the radio version + * ( RADIO_VERSION_NOTIFICATION = 0x68 ) + * @radio_flavor: + * @radio_step: + * @radio_dash: + */ +struct iwl_radio_version_notif { + __le32 radio_flavor; + __le32 radio_step; + __le32 radio_dash; +} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */ + +enum iwl_card_state_flags { + CARD_ENABLED = 0x00, + HW_CARD_DISABLED = 0x01, + SW_CARD_DISABLED = 0x02, + CT_KILL_CARD_DISABLED = 0x04, + HALT_CARD_DISABLED = 0x08, + CARD_DISABLED_MSK = 0x0f, + CARD_IS_RX_ON = 0x10, +}; + +/** + * struct iwl_radio_version_notif - information on the radio version + * ( CARD_STATE_NOTIFICATION = 0xa1 ) + * @flags: %iwl_card_state_flags + */ +struct iwl_card_state_notif { + __le32 flags; +} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ + +/** + * struct iwl_set_calib_default_cmd - set default value for calibration. + * ( SET_CALIB_DEFAULT_CMD = 0x8e ) + * @calib_index: the calibration to set value for + * @length: of data + * @data: the value to set for the calibration result + */ +struct iwl_set_calib_default_cmd { + __le16 calib_index; + __le16 length; + u8 data[0]; +} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */ + +#endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c new file mode 100644 index 000000000000..d3d959db03a9 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -0,0 +1,640 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include <net/mac80211.h> + +#include "iwl-trans.h" +#include "iwl-op-mode.h" +#include "iwl-fw.h" +#include "iwl-debug.h" +#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */ +#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */ +#include "iwl-eeprom-parse.h" + +#include "mvm.h" +#include "iwl-phy-db.h" + +#define MVM_UCODE_ALIVE_TIMEOUT HZ +#define MVM_UCODE_CALIB_TIMEOUT (2*HZ) + +#define UCODE_VALID_OK cpu_to_le32(0x1) + +/* Default calibration values for WkP - set to INIT image w/o running */ +static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f, + 0x00, 0x18, 0x00 }; +static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f }; +static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 }; +static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00, + 0x00 }; +static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 }; +static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 }; +static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 }; +static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 }; + +struct iwl_calib_default_data { + u16 size; + void *data; +}; + +#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf} + +static const struct iwl_calib_default_data wkp_calib_default_data[12] = { + [5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc), + [6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter), + [7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo), + [8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq), + [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew), + [10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq), + [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew), +}; + +struct iwl_mvm_alive_data { + bool valid; + u32 scd_base_addr; +}; + +static inline const struct fw_img * +iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) +{ + if (ucode_type >= IWL_UCODE_TYPE_MAX) + return NULL; + + return &mvm->fw->img[ucode_type]; +} + +static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) +{ + struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { + .valid = cpu_to_le32(valid_tx_ant), + }; + + IWL_DEBUG_HC(mvm, "select valid tx ant: %u\n", valid_tx_ant); + return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, CMD_SYNC, + sizeof(tx_ant_cmd), &tx_ant_cmd); +} + +static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_mvm_alive_data *alive_data = data; + struct mvm_alive_resp *palive; + + palive = (void *)pkt->data; + + mvm->error_event_table = le32_to_cpu(palive->error_event_table_ptr); + mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr); + alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr); + + alive_data->valid = le16_to_cpu(palive->status) == IWL_ALIVE_STATUS_OK; + IWL_DEBUG_FW(mvm, "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", + le16_to_cpu(palive->status), palive->ver_type, + palive->ver_subtype); + + return true; +} + +static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_phy_db *phy_db = data; + + if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) { + WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); + return true; + } + + WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC)); + + return false; +} + +static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, + enum iwl_ucode_type ucode_type) +{ + struct iwl_notification_wait alive_wait; + struct iwl_mvm_alive_data alive_data; + const struct fw_img *fw; + int ret, i; + enum iwl_ucode_type old_type = mvm->cur_ucode; + static const u8 alive_cmd[] = { MVM_ALIVE }; + + mvm->cur_ucode = ucode_type; + fw = iwl_get_ucode_image(mvm, ucode_type); + + mvm->ucode_loaded = false; + + if (!fw) + return -EINVAL; + + iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, + alive_cmd, ARRAY_SIZE(alive_cmd), + iwl_alive_fn, &alive_data); + + ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT); + if (ret) { + mvm->cur_ucode = old_type; + iwl_remove_notification(&mvm->notif_wait, &alive_wait); + return ret; + } + + /* + * Some things may run in the background now, but we + * just wait for the ALIVE notification here. + */ + ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, + MVM_UCODE_ALIVE_TIMEOUT); + if (ret) { + mvm->cur_ucode = old_type; + return ret; + } + + if (!alive_data.valid) { + IWL_ERR(mvm, "Loaded ucode is not valid!\n"); + mvm->cur_ucode = old_type; + return -EIO; + } + + iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); + + /* + * Note: all the queues are enabled as part of the interface + * initialization, but in firmware restart scenarios they + * could be stopped, so wake them up. In firmware restart, + * mac80211 will have the queues stopped as well until the + * reconfiguration completes. During normal startup, they + * will be empty. + */ + + for (i = 0; i < IWL_MAX_HW_QUEUES; i++) { + if (i < IWL_MVM_FIRST_AGG_QUEUE && i != IWL_MVM_CMD_QUEUE) + mvm->queue_to_mac80211[i] = i; + else + mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE; + atomic_set(&mvm->queue_stop_count[i], 0); + } + + mvm->transport_queue_stop = 0; + + mvm->ucode_loaded = true; + + return 0; +} +#define IWL_HW_REV_ID_RAINBOW 0x2 +#define IWL_PROJ_TYPE_LHP 0x5 + +static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm) +{ + struct iwl_nvm_data *data = mvm->nvm_data; + /* Temp calls to static definitions, will be changed to CSR calls */ + u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW; + u8 project_type = IWL_PROJ_TYPE_LHP; + + return data->radio_cfg_dash | (data->radio_cfg_step << 2) | + (hw_rev_id << 4) | ((project_type & 0x7f) << 6) | + (data->valid_tx_ant << 16) | (data->valid_rx_ant << 20); +} + +static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) +{ + struct iwl_phy_cfg_cmd phy_cfg_cmd; + enum iwl_ucode_type ucode_type = mvm->cur_ucode; + + /* Set parameters */ + phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm)); + phy_cfg_cmd.calib_control.event_trigger = + mvm->fw->default_calib[ucode_type].event_trigger; + phy_cfg_cmd.calib_control.flow_trigger = + mvm->fw->default_calib[ucode_type].flow_trigger; + + IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", + phy_cfg_cmd.phy_cfg); + + return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, CMD_SYNC, + sizeof(phy_cfg_cmd), &phy_cfg_cmd); +} + +/* Starting with the new PHY DB implementation - New calibs are enabled */ +/* Value - 0x405e7 */ +#define IWL_CALIB_DEFAULT_FLOW_INIT (IWL_CALIB_CFG_XTAL_IDX |\ + IWL_CALIB_CFG_TEMPERATURE_IDX |\ + IWL_CALIB_CFG_VOLTAGE_READ_IDX |\ + IWL_CALIB_CFG_DC_IDX |\ + IWL_CALIB_CFG_BB_FILTER_IDX |\ + IWL_CALIB_CFG_LO_LEAKAGE_IDX |\ + IWL_CALIB_CFG_TX_IQ_IDX |\ + IWL_CALIB_CFG_RX_IQ_IDX |\ + IWL_CALIB_CFG_AGC_IDX) + +#define IWL_CALIB_DEFAULT_EVENT_INIT 0x0 + +/* Value 0x41567 */ +#define IWL_CALIB_DEFAULT_FLOW_RUN (IWL_CALIB_CFG_XTAL_IDX |\ + IWL_CALIB_CFG_TEMPERATURE_IDX |\ + IWL_CALIB_CFG_VOLTAGE_READ_IDX |\ + IWL_CALIB_CFG_BB_FILTER_IDX |\ + IWL_CALIB_CFG_DC_IDX |\ + IWL_CALIB_CFG_TX_IQ_IDX |\ + IWL_CALIB_CFG_RX_IQ_IDX |\ + IWL_CALIB_CFG_SENSITIVITY_IDX |\ + IWL_CALIB_CFG_AGC_IDX) + +#define IWL_CALIB_DEFAULT_EVENT_RUN (IWL_CALIB_CFG_XTAL_IDX |\ + IWL_CALIB_CFG_TEMPERATURE_IDX |\ + IWL_CALIB_CFG_VOLTAGE_READ_IDX |\ + IWL_CALIB_CFG_TX_PWR_IDX |\ + IWL_CALIB_CFG_DC_IDX |\ + IWL_CALIB_CFG_TX_IQ_IDX |\ + IWL_CALIB_CFG_SENSITIVITY_IDX) + +/* + * Sets the calibrations trigger values that will be sent to the FW for runtime + * and init calibrations. + * The ones given in the FW TLV are not correct. + */ +static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm) +{ + struct iwl_tlv_calib_ctrl default_calib; + + /* + * WkP FW TLV calib bits are wrong, overwrite them. + * This defines the dynamic calibrations which are implemented in the + * uCode both for init(flow) calculation and event driven calibs. + */ + + /* Init Image */ + default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT); + default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT); + + if (default_calib.event_trigger != + mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger) + IWL_ERR(mvm, + "Updating the event calib for INIT image: 0x%x -> 0x%x\n", + mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger, + default_calib.event_trigger); + if (default_calib.flow_trigger != + mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger) + IWL_ERR(mvm, + "Updating the flow calib for INIT image: 0x%x -> 0x%x\n", + mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger, + default_calib.flow_trigger); + + memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT], + &default_calib, sizeof(struct iwl_tlv_calib_ctrl)); + IWL_ERR(mvm, + "Setting uCode init calibrations event 0x%x, trigger 0x%x\n", + default_calib.event_trigger, + default_calib.flow_trigger); + + /* Run time image */ + default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN); + default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN); + + if (default_calib.event_trigger != + mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger) + IWL_ERR(mvm, + "Updating the event calib for RT image: 0x%x -> 0x%x\n", + mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger, + default_calib.event_trigger); + if (default_calib.flow_trigger != + mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger) + IWL_ERR(mvm, + "Updating the flow calib for RT image: 0x%x -> 0x%x\n", + mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger, + default_calib.flow_trigger); + + memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR], + &default_calib, sizeof(struct iwl_tlv_calib_ctrl)); + IWL_ERR(mvm, + "Setting uCode runtime calibs event 0x%x, trigger 0x%x\n", + default_calib.event_trigger, + default_calib.flow_trigger); +} + +static int iwl_set_default_calibrations(struct iwl_mvm *mvm) +{ + u8 cmd_raw[16]; /* holds the variable size commands */ + struct iwl_set_calib_default_cmd *cmd = + (struct iwl_set_calib_default_cmd *)cmd_raw; + int ret, i; + + /* Setting default values for calibrations we don't run */ + for (i = 0; i < ARRAY_SIZE(wkp_calib_default_data); i++) { + u16 cmd_len; + + if (wkp_calib_default_data[i].size == 0) + continue; + + memset(cmd_raw, 0, sizeof(cmd_raw)); + cmd_len = wkp_calib_default_data[i].size + sizeof(cmd); + cmd->calib_index = cpu_to_le16(i); + cmd->length = cpu_to_le16(wkp_calib_default_data[i].size); + if (WARN_ONCE(cmd_len > sizeof(cmd_raw), + "Need to enlarge cmd_raw to %d\n", cmd_len)) + break; + memcpy(cmd->data, wkp_calib_default_data[i].data, + wkp_calib_default_data[i].size); + ret = iwl_mvm_send_cmd_pdu(mvm, SET_CALIB_DEFAULT_CMD, 0, + sizeof(*cmd) + + wkp_calib_default_data[i].size, + cmd); + if (ret) + return ret; + } + + return 0; +} + +int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) +{ + struct iwl_notification_wait calib_wait; + static const u8 init_complete[] = { + INIT_COMPLETE_NOTIF, + CALIB_RES_NOTIF_PHY_DB + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (mvm->init_ucode_run) + return 0; + + iwl_init_notification_wait(&mvm->notif_wait, + &calib_wait, + init_complete, + ARRAY_SIZE(init_complete), + iwl_wait_phy_db_entry, + mvm->phy_db); + + /* Will also start the device */ + ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); + if (ret) { + IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); + goto error; + } + + if (read_nvm) { + /* Read nvm */ + ret = iwl_nvm_init(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); + goto error; + } + } + + ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); + WARN_ON(ret); + + /* Override the calibrations from TLV and the const of fw */ + iwl_set_default_calib_trigger(mvm); + + /* WkP doesn't have all calibrations, need to set default values */ + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + ret = iwl_set_default_calibrations(mvm); + if (ret) + goto error; + } + + /* + * Send phy configurations command to init uCode + * to start the 16.0 uCode init image internal calibrations. + */ + ret = iwl_send_phy_cfg_cmd(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", + ret); + goto error; + } + + /* + * Some things may run in the background now, but we + * just wait for the calibration complete notification. + */ + ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, + MVM_UCODE_CALIB_TIMEOUT); + if (!ret) + mvm->init_ucode_run = true; + goto out; + +error: + iwl_remove_notification(&mvm->notif_wait, &calib_wait); +out: + if (!iwlmvm_mod_params.init_dbg) { + iwl_trans_stop_device(mvm->trans); + } else if (!mvm->nvm_data) { + /* we want to debug INIT and we have no NVM - fake */ + mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + + sizeof(struct ieee80211_channel) + + sizeof(struct ieee80211_rate), + GFP_KERNEL); + if (!mvm->nvm_data) + return -ENOMEM; + mvm->nvm_data->valid_rx_ant = 1; + mvm->nvm_data->valid_tx_ant = 1; + mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels; + mvm->nvm_data->bands[0].n_channels = 1; + mvm->nvm_data->bands[0].n_bitrates = 1; + mvm->nvm_data->bands[0].bitrates = + (void *)mvm->nvm_data->channels + 1; + mvm->nvm_data->bands[0].bitrates->hw_value = 10; + } + + return ret; +} + +#define UCODE_CALIB_TIMEOUT (2*HZ) + +int iwl_mvm_up(struct iwl_mvm *mvm) +{ + int ret, i; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_trans_start_hw(mvm->trans); + if (ret) + return ret; + + /* If we were in RFKILL during module loading, load init ucode now */ + if (!mvm->init_ucode_run) { + ret = iwl_run_init_mvm_ucode(mvm, false); + if (ret && !iwlmvm_mod_params.init_dbg) { + IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); + goto error; + } + } + + if (iwlmvm_mod_params.init_dbg) + return 0; + + ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); + if (ret) { + IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); + goto error; + } + + ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant); + if (ret) + goto error; + + /* Send phy db control command and then phy db calibration*/ + ret = iwl_send_phy_db_data(mvm->phy_db); + if (ret) + goto error; + + ret = iwl_send_phy_cfg_cmd(mvm); + if (ret) + goto error; + + /* init the fw <-> mac80211 STA mapping */ + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); + + /* Add auxiliary station for scanning */ + ret = iwl_mvm_add_aux_sta(mvm); + if (ret) + goto error; + + IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); + + return 0; + error: + iwl_trans_stop_device(mvm->trans); + return ret; +} + +int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) +{ + int ret, i; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_trans_start_hw(mvm->trans); + if (ret) + return ret; + + ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN); + if (ret) { + IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret); + goto error; + } + + ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant); + if (ret) + goto error; + + /* Send phy db control command and then phy db calibration*/ + ret = iwl_send_phy_db_data(mvm->phy_db); + if (ret) + goto error; + + ret = iwl_send_phy_cfg_cmd(mvm); + if (ret) + goto error; + + /* init the fw <-> mac80211 STA mapping */ + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); + + /* Add auxiliary station for scanning */ + ret = iwl_mvm_add_aux_sta(mvm); + if (ret) + goto error; + + return 0; + error: + iwl_trans_stop_device(mvm->trans); + return ret; +} + +int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; + u32 flags = le32_to_cpu(card_state_notif->flags); + + IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On", + (flags & CT_KILL_CARD_DISABLED) ? + "Reached" : "Not reached"); + + return 0; +} + +int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_radio_version_notif *radio_version = (void *)pkt->data; + + /* TODO: what to do with that? */ + IWL_DEBUG_INFO(mvm, + "Radio version: flavor: 0x%08x, step 0x%08x, dash 0x%08x\n", + le32_to_cpu(radio_version->radio_flavor), + le32_to_cpu(radio_version->radio_step), + le32_to_cpu(radio_version->radio_dash)); + return 0; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c new file mode 100644 index 000000000000..011906e73a05 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/led.c @@ -0,0 +1,134 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <linux/leds.h> +#include "iwl-io.h" +#include "iwl-csr.h" +#include "mvm.h" + +/* Set led register on */ +static void iwl_mvm_led_enable(struct iwl_mvm *mvm) +{ + iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON); +} + +/* Set led register off */ +static void iwl_mvm_led_disable(struct iwl_mvm *mvm) +{ + iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_OFF); +} + +static void iwl_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led); + if (brightness > 0) + iwl_mvm_led_enable(mvm); + else + iwl_mvm_led_disable(mvm); +} + +int iwl_mvm_leds_init(struct iwl_mvm *mvm) +{ + int mode = iwlwifi_mod_params.led_mode; + int ret; + + switch (mode) { + case IWL_LED_DEFAULT: + case IWL_LED_RF_STATE: + mode = IWL_LED_RF_STATE; + break; + case IWL_LED_DISABLE: + IWL_INFO(mvm, "Led disabled\n"); + return 0; + default: + return -EINVAL; + }; + + mvm->led.name = kasprintf(GFP_KERNEL, "%s-led", + wiphy_name(mvm->hw->wiphy)); + mvm->led.brightness_set = iwl_led_brightness_set; + mvm->led.max_brightness = 1; + + if (mode == IWL_LED_RF_STATE) + mvm->led.default_trigger = + ieee80211_get_radio_led_name(mvm->hw); + + ret = led_classdev_register(mvm->trans->dev, &mvm->led); + if (ret) { + kfree(mvm->led.name); + IWL_INFO(mvm, "Failed to enable led\n"); + return ret; + } + + return 0; +} + +void iwl_mvm_leds_exit(struct iwl_mvm *mvm) +{ + if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE) + return; + + led_classdev_unregister(&mvm->led); + kfree(mvm->led.name); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c new file mode 100644 index 000000000000..341dbc0237ea --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -0,0 +1,992 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <linux/etherdevice.h> +#include <net/mac80211.h> +#include "iwl-io.h" +#include "iwl-prph.h" +#include "fw-api.h" +#include "mvm.h" + +const u8 iwl_mvm_ac_to_tx_fifo[] = { + IWL_MVM_TX_FIFO_BK, + IWL_MVM_TX_FIFO_BE, + IWL_MVM_TX_FIFO_VI, + IWL_MVM_TX_FIFO_VO, +}; + +struct iwl_mvm_mac_iface_iterator_data { + struct iwl_mvm *mvm; + struct ieee80211_vif *vif; + unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)]; + unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)]; + unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_FIRST_AGG_QUEUE)]; + enum iwl_tsf_id preferred_tsf; + bool found_vif; +}; + +static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_mac_iface_iterator_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 ac; + + /* Iterator may already find the interface being added -- skip it */ + if (vif == data->vif) { + data->found_vif = true; + return; + } + + /* Mark the queues used by the vif */ + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) + __set_bit(vif->hw_queue[ac], data->used_hw_queues); + + if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) + __set_bit(vif->cab_queue, data->used_hw_queues); + + /* + * Mark MAC IDs as used by clearing the available bit, and + * (below) mark TSFs as used if their existing use is not + * compatible with the new interface type. + * No locking or atomic bit operations are needed since the + * data is on the stack of the caller function. + */ + __clear_bit(mvmvif->id, data->available_mac_ids); + + /* + * The TSF is a hardware/firmware resource, there are 4 and + * the driver should assign and free them as needed. However, + * there are cases where 2 MACs should share the same TSF ID + * for the purpose of clock sync, an optimization to avoid + * clock drift causing overlapping TBTTs/DTIMs for a GO and + * client in the system. + * + * The firmware will decide according to the MAC type which + * will be the master and slave. Clients that need to sync + * with a remote station will be the master, and an AP or GO + * will be the slave. + * + * Depending on the new interface type it can be slaved to + * or become the master of an existing interface. + */ + switch (data->vif->type) { + case NL80211_IFTYPE_STATION: + /* + * The new interface is client, so if the existing one + * we're iterating is an AP, the TSF should be used to + * avoid drift between the new client and existing AP, + * the existing AP will get drift updates from the new + * client context in this case + */ + if (vif->type == NL80211_IFTYPE_AP) { + if (data->preferred_tsf == NUM_TSF_IDS && + test_bit(mvmvif->tsf_id, data->available_tsf_ids)) + data->preferred_tsf = mvmvif->tsf_id; + return; + } + break; + case NL80211_IFTYPE_AP: + /* + * The new interface is AP/GO, so should get drift + * updates from an existing client or use the same + * TSF as an existing GO. There's no drift between + * TSFs internally but if they used different TSFs + * then a new client MAC could update one of them + * and cause drift that way. + */ + if (vif->type == NL80211_IFTYPE_STATION || + vif->type == NL80211_IFTYPE_AP) { + if (data->preferred_tsf == NUM_TSF_IDS && + test_bit(mvmvif->tsf_id, data->available_tsf_ids)) + data->preferred_tsf = mvmvif->tsf_id; + return; + } + break; + default: + /* + * For all other interface types there's no need to + * take drift into account. Either they're exclusive + * like IBSS and monitor, or we don't care much about + * their TSF (like P2P Device), but we won't be able + * to share the TSF resource. + */ + break; + } + + /* + * Unless we exited above, we can't share the TSF resource + * that the virtual interface we're iterating over is using + * with the new one, so clear the available bit and if this + * was the preferred one, reset that as well. + */ + __clear_bit(mvmvif->tsf_id, data->available_tsf_ids); + + if (data->preferred_tsf == mvmvif->tsf_id) + data->preferred_tsf = NUM_TSF_IDS; +} + +/* + * Get the mask of the queus used by the vif + */ +u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + u32 qmask, ac; + + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) + return BIT(IWL_OFFCHANNEL_QUEUE); + + qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ? + BIT(vif->cab_queue) : 0; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) + qmask |= BIT(vif->hw_queue[ac]); + + return qmask; +} + +static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_mac_iface_iterator_data data = { + .mvm = mvm, + .vif = vif, + .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 }, + .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, + /* no preference yet */ + .preferred_tsf = NUM_TSF_IDS, + .used_hw_queues = { + BIT(IWL_MVM_OFFCHANNEL_QUEUE) | + BIT(IWL_MVM_AUX_QUEUE) | + BIT(IWL_MVM_CMD_QUEUE) + }, + .found_vif = false, + }; + u32 ac; + int ret; + + /* + * Allocate a MAC ID and a TSF for this MAC, along with the queues + * and other resources. + */ + + /* + * Before the iterator, we start with all MAC IDs and TSFs available. + * + * During iteration, all MAC IDs are cleared that are in use by other + * virtual interfaces, and all TSF IDs are cleared that can't be used + * by this new virtual interface because they're used by an interface + * that can't share it with the new one. + * At the same time, we check if there's a preferred TSF in the case + * that we should share it with another interface. + */ + + /* Currently, MAC ID 0 should be used only for the managed vif */ + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + __clear_bit(0, data.available_mac_ids); + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, + iwl_mvm_mac_iface_iterator, &data); + + /* + * In the case we're getting here during resume, it's similar to + * firmware restart, and with RESUME_ALL the iterator will find + * the vif being added already. + * We don't want to reassign any IDs in either case since doing + * so would probably assign different IDs (as interfaces aren't + * necessarily added in the same order), but the old IDs were + * preserved anyway, so skip ID assignment for both resume and + * recovery. + */ + if (data.found_vif) + return 0; + + /* Therefore, in recovery, we can't get here */ + WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); + + mvmvif->id = find_first_bit(data.available_mac_ids, + NUM_MAC_INDEX_DRIVER); + if (mvmvif->id == NUM_MAC_INDEX_DRIVER) { + IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n"); + ret = -EIO; + goto exit_fail; + } + + if (data.preferred_tsf != NUM_TSF_IDS) + mvmvif->tsf_id = data.preferred_tsf; + else + mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, + NUM_TSF_IDS); + if (mvmvif->tsf_id == NUM_TSF_IDS) { + IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n"); + ret = -EIO; + goto exit_fail; + } + + mvmvif->color = 0; + + INIT_LIST_HEAD(&mvmvif->time_event_data.list); + mvmvif->time_event_data.id = TE_MAX; + + /* No need to allocate data queues to P2P Device MAC.*/ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE; + + return 0; + } + + /* Find available queues, and allocate them to the ACs */ + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + u8 queue = find_first_zero_bit(data.used_hw_queues, + IWL_MVM_FIRST_AGG_QUEUE); + + if (queue >= IWL_MVM_FIRST_AGG_QUEUE) { + IWL_ERR(mvm, "Failed to allocate queue\n"); + ret = -EIO; + goto exit_fail; + } + + __set_bit(queue, data.used_hw_queues); + vif->hw_queue[ac] = queue; + } + + /* Allocate the CAB queue for softAP and GO interfaces */ + if (vif->type == NL80211_IFTYPE_AP) { + u8 queue = find_first_zero_bit(data.used_hw_queues, + IWL_MVM_FIRST_AGG_QUEUE); + + if (queue >= IWL_MVM_FIRST_AGG_QUEUE) { + IWL_ERR(mvm, "Failed to allocate cab queue\n"); + ret = -EIO; + goto exit_fail; + } + + vif->cab_queue = queue; + } else { + vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; + } + + mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT; + mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + + return 0; + +exit_fail: + memset(mvmvif, 0, sizeof(struct iwl_mvm_vif)); + memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue)); + vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; + return ret; +} + +int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + u32 ac; + int ret; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_mac_ctxt_allocate_resources(mvm, vif); + if (ret) + return ret; + + switch (vif->type) { + case NL80211_IFTYPE_P2P_DEVICE: + iwl_trans_ac_txq_enable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MVM_TX_FIFO_VO); + break; + case NL80211_IFTYPE_AP: + iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue, + IWL_MVM_TX_FIFO_VO); + /* fall through */ + default: + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + iwl_trans_ac_txq_enable(mvm->trans, vif->hw_queue[ac], + iwl_mvm_ac_to_tx_fifo[ac]); + break; + } + + return 0; +} + +void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + int ac; + + lockdep_assert_held(&mvm->mutex); + + switch (vif->type) { + case NL80211_IFTYPE_P2P_DEVICE: + iwl_trans_txq_disable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE); + break; + case NL80211_IFTYPE_AP: + iwl_trans_txq_disable(mvm->trans, vif->cab_queue); + /* fall through */ + default: + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + iwl_trans_txq_disable(mvm->trans, vif->hw_queue[ac]); + } +} + +static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum ieee80211_band band, + u8 *cck_rates, u8 *ofdm_rates) +{ + struct ieee80211_supported_band *sband; + unsigned long basic = vif->bss_conf.basic_rates; + int lowest_present_ofdm = 100; + int lowest_present_cck = 100; + u8 cck = 0; + u8 ofdm = 0; + int i; + + sband = mvm->hw->wiphy->bands[band]; + + for_each_set_bit(i, &basic, BITS_PER_LONG) { + int hw = sband->bitrates[i].hw_value; + if (hw >= IWL_FIRST_OFDM_RATE) { + ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE); + if (lowest_present_ofdm > hw) + lowest_present_ofdm = hw; + } else { + BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); + + cck |= BIT(hw); + if (lowest_present_cck > hw) + lowest_present_cck = hw; + } + } + + /* + * Now we've got the basic rates as bitmaps in the ofdm and cck + * variables. This isn't sufficient though, as there might not + * be all the right rates in the bitmap. E.g. if the only basic + * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps + * and 6 Mbps because the 802.11-2007 standard says in 9.6: + * + * [...] a STA responding to a received frame shall transmit + * its Control Response frame [...] at the highest rate in the + * BSSBasicRateSet parameter that is less than or equal to the + * rate of the immediately previous frame in the frame exchange + * sequence ([...]) and that is of the same modulation class + * ([...]) as the received frame. If no rate contained in the + * BSSBasicRateSet parameter meets these conditions, then the + * control frame sent in response to a received frame shall be + * transmitted at the highest mandatory rate of the PHY that is + * less than or equal to the rate of the received frame, and + * that is of the same modulation class as the received frame. + * + * As a consequence, we need to add all mandatory rates that are + * lower than all of the basic rates to these bitmaps. + */ + + if (IWL_RATE_24M_INDEX < lowest_present_ofdm) + ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE; + if (IWL_RATE_12M_INDEX < lowest_present_ofdm) + ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE; + /* 6M already there or needed so always add */ + ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE; + + /* + * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. + * Note, however: + * - if no CCK rates are basic, it must be ERP since there must + * be some basic rates at all, so they're OFDM => ERP PHY + * (or we're in 5 GHz, and the cck bitmap will never be used) + * - if 11M is a basic rate, it must be ERP as well, so add 5.5M + * - if 5.5M is basic, 1M and 2M are mandatory + * - if 2M is basic, 1M is mandatory + * - if 1M is basic, that's the only valid ACK rate. + * As a consequence, it's not as complicated as it sounds, just add + * any lower rates to the ACK rate bitmap. + */ + if (IWL_RATE_11M_INDEX < lowest_present_cck) + cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE; + if (IWL_RATE_5M_INDEX < lowest_present_cck) + cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE; + if (IWL_RATE_2M_INDEX < lowest_present_cck) + cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE; + /* 1M already there or needed so always add */ + cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE; + + *cck_rates = cck; + *ofdm_rates = ofdm; +} + +static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_ctx_cmd *cmd, + u32 action) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_chanctx_conf *chanctx; + u8 cck_ack_rates, ofdm_ack_rates; + int i; + + cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + cmd->action = cpu_to_le32(action); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + if (vif->p2p) + cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA); + else + cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA); + break; + case NL80211_IFTYPE_AP: + cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO); + break; + case NL80211_IFTYPE_MONITOR: + cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER); + break; + case NL80211_IFTYPE_P2P_DEVICE: + cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE); + break; + case NL80211_IFTYPE_ADHOC: + cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS); + break; + default: + WARN_ON_ONCE(1); + } + + cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id); + + memcpy(cmd->node_addr, vif->addr, ETH_ALEN); + if (vif->bss_conf.bssid) + memcpy(cmd->bssid_addr, vif->bss_conf.bssid, ETH_ALEN); + else + eth_broadcast_addr(cmd->bssid_addr); + + rcu_read_lock(); + chanctx = rcu_dereference(vif->chanctx_conf); + iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band + : IEEE80211_BAND_2GHZ, + &cck_ack_rates, &ofdm_ack_rates); + rcu_read_unlock(); + + cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates); + cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates); + + cmd->cck_short_preamble = + cpu_to_le32(vif->bss_conf.use_short_preamble ? + MAC_FLG_SHORT_PREAMBLE : 0); + cmd->short_slot = + cpu_to_le32(vif->bss_conf.use_short_slot ? + MAC_FLG_SHORT_SLOT : 0); + + for (i = 0; i < AC_NUM; i++) { + cmd->ac[i].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min); + cmd->ac[i].cw_max = cpu_to_le16(mvmvif->queue_params[i].cw_max); + cmd->ac[i].aifsn = mvmvif->queue_params[i].aifs; + cmd->ac[i].edca_txop = + cpu_to_le16(mvmvif->queue_params[i].txop * 32); + cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]); + } + + if (vif->bss_conf.qos) + cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); + + if (vif->bss_conf.use_cts_prot) + cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT | + MAC_PROT_FLG_SELF_CTS_EN); + + /* + * I think that we should enable these 2 flags regardless the HT PROT + * fields in the HT IE, but I am not sure. Someone knows whom to ask?... + */ + if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) { + cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN); + cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_HT_PROT | + MAC_PROT_FLG_FAT_PROT); + } + + cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); +} + +static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, + struct iwl_mac_ctx_cmd *cmd) +{ + int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC, + sizeof(*cmd), cmd); + if (ret) + IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n", + le32_to_cpu(cmd->action), ret); + return ret; +} + +/* + * Fill the specific data for mac context of type station or p2p client + */ +static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_data_sta *ctxt_sta) +{ + /* We need the dtim_period to set the MAC as associated */ + if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) { + u32 dtim_offs; + + /* + * The DTIM count counts down, so when it is N that means N + * more beacon intervals happen until the DTIM TBTT. Therefore + * add this to the current time. If that ends up being in the + * future, the firmware will handle it. + * + * Also note that the system_timestamp (which we get here as + * "sync_device_ts") and TSF timestamp aren't at exactly the + * same offset in the frame -- the TSF is at the first symbol + * of the TSF, the system timestamp is at signal acquisition + * time. This means there's an offset between them of at most + * a few hundred microseconds (24 * 8 bits + PLCP time gives + * 384us in the longest case), this is currently not relevant + * as the firmware wakes up around 2ms before the TBTT. + */ + dtim_offs = vif->bss_conf.sync_dtim_count * + vif->bss_conf.beacon_int; + /* convert TU to usecs */ + dtim_offs *= 1024; + + ctxt_sta->dtim_tsf = + cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs); + ctxt_sta->dtim_time = + cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs); + + IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n", + le64_to_cpu(ctxt_sta->dtim_tsf), + le32_to_cpu(ctxt_sta->dtim_time), + dtim_offs); + + ctxt_sta->is_assoc = cpu_to_le32(1); + } else { + ctxt_sta->is_assoc = cpu_to_le32(0); + } + + ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); + ctxt_sta->bi_reciprocal = + cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int)); + ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * + vif->bss_conf.dtim_period); + ctxt_sta->dtim_reciprocal = + cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int * + vif->bss_conf.dtim_period)); + + ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval); + ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid); +} + +static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_ctx_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_STATION || vif->p2p); + + /* Fill the common data for all mac context types */ + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + /* Fill the data specific for station mode */ + iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta); + + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_ctx_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_STATION || !vif->p2p); + + /* Fill the common data for all mac context types */ + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + /* Fill the data specific for station mode */ + iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta); + + cmd.p2p_sta.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow); + + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_ctx_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); + + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + /* No other data to be filled */ + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); +} + +struct iwl_mvm_go_iterator_data { + bool go_active; +}; + +static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) +{ + struct iwl_mvm_go_iterator_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_active) + data->go_active = true; +} + +static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_ctx_cmd cmd = {}; + struct iwl_mvm_go_iterator_data data = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); + + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROMISC); + + /* + * This flag should be set to true when the P2P Device is + * discoverable and there is at least another active P2P GO. Settings + * this flag will allow the P2P Device to be discoverable on other + * channels in addition to its listen channel. + * Note that this flag should not be set in other cases as it opens the + * Rx filters on all MAC and increases the number of interrupts. + */ + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, + iwl_mvm_go_iterator, &data); + + cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0); + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); +} + +static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, + struct iwl_mac_beacon_cmd *beacon_cmd, + u8 *beacon, u32 frame_size) +{ + u32 tim_idx; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; + + /* The index is relative to frame start but we start looking at the + * variable-length part of the beacon. */ + tim_idx = mgmt->u.beacon.variable - beacon; + + /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ + while ((tim_idx < (frame_size - 2)) && + (beacon[tim_idx] != WLAN_EID_TIM)) + tim_idx += beacon[tim_idx+1] + 2; + + /* If TIM field was found, set variables */ + if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { + beacon_cmd->tim_idx = cpu_to_le32(tim_idx); + beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]); + } else { + IWL_WARN(mvm, "Unable to find TIM Element in beacon\n"); + } +} + +static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_host_cmd cmd = { + .id = BEACON_TEMPLATE_CMD, + .flags = CMD_ASYNC, + }; + struct iwl_mac_beacon_cmd beacon_cmd = {}; + struct ieee80211_tx_info *info; + u32 beacon_skb_len; + u32 rate; + + if (WARN_ON(!beacon)) + return -EINVAL; + + beacon_skb_len = beacon->len; + + /* TODO: for now the beacon template id is set to be the mac context id. + * Might be better to handle it as another resource ... */ + beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + /* Set up TX command fields */ + beacon_cmd.tx.len = cpu_to_le16((u16)beacon_skb_len); + beacon_cmd.tx.sta_id = mvmvif->bcast_sta.sta_id; + beacon_cmd.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); + beacon_cmd.tx.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | + TX_CMD_FLG_BT_DIS | + TX_CMD_FLG_TSF); + + mvm->mgmt_last_antenna_idx = + iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant, + mvm->mgmt_last_antenna_idx); + + beacon_cmd.tx.rate_n_flags = + cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << + RATE_MCS_ANT_POS); + + info = IEEE80211_SKB_CB(beacon); + + if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) { + rate = IWL_FIRST_OFDM_RATE; + } else { + rate = IWL_FIRST_CCK_RATE; + beacon_cmd.tx.rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK); + } + beacon_cmd.tx.rate_n_flags |= + cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); + + /* Set up TX beacon command fields */ + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd, + beacon->data, + beacon_skb_len); + + /* Submit command */ + cmd.len[0] = sizeof(beacon_cmd); + cmd.data[0] = &beacon_cmd; + cmd.dataflags[0] = 0; + cmd.len[1] = beacon_skb_len; + cmd.data[1] = beacon->data; + cmd.dataflags[1] = IWL_HCMD_DFL_DUP; + + return iwl_mvm_send_cmd(mvm, &cmd); +} + +/* The beacon template for the AP/GO context has changed and needs update */ +int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct sk_buff *beacon; + int ret; + + WARN_ON(vif->type != NL80211_IFTYPE_AP); + + beacon = ieee80211_beacon_get(mvm->hw, vif); + if (!beacon) + return -ENOMEM; + + ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon); + dev_kfree_skb(beacon); + return ret; +} + +/* + * Fill the specific data for mac context of type AP of P2P GO + */ +static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_data_ap *ctxt_ap) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 curr_dev_time; + + ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int); + ctxt_ap->bi_reciprocal = + cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int)); + ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * + vif->bss_conf.dtim_period); + ctxt_ap->dtim_reciprocal = + cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int * + vif->bss_conf.dtim_period)); + + ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue); + curr_dev_time = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG); + ctxt_ap->beacon_time = cpu_to_le32(curr_dev_time); + + ctxt_ap->beacon_tsf = cpu_to_le64(curr_dev_time); + + /* TODO: Assume that the beacon id == mac context id */ + ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id); +} + +static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_ctx_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p); + + /* Fill the common data for all mac context types */ + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + /* Fill the data specific for ap mode */ + iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap); + + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_ctx_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p); + + /* Fill the common data for all mac context types */ + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + /* Fill the data specific for GO mode */ + iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap); + + cmd.go.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow); + cmd.go.opp_ps_enabled = cpu_to_le32(!!vif->bss_conf.p2p_oppps); + + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + u32 action) +{ + switch (vif->type) { + case NL80211_IFTYPE_STATION: + if (!vif->p2p) + return iwl_mvm_mac_ctxt_cmd_station(mvm, vif, + action); + else + return iwl_mvm_mac_ctxt_cmd_p2p_client(mvm, vif, + action); + break; + case NL80211_IFTYPE_AP: + if (!vif->p2p) + return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action); + else + return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action); + break; + case NL80211_IFTYPE_MONITOR: + return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action); + case NL80211_IFTYPE_P2P_DEVICE: + return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action); + default: + break; + } + + return -EOPNOTSUPP; +} + +int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n", + vif->addr, ieee80211_vif_type_p2p(vif))) + return -EIO; + + ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD); + if (ret) + return ret; + + mvmvif->uploaded = true; + return 0; +} + +int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n", + vif->addr, ieee80211_vif_type_p2p(vif))) + return -EIO; + + return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY); +} + +int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_ctx_cmd cmd; + int ret; + + if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n", + vif->addr, ieee80211_vif_type_p2p(vif))) + return -EIO; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); + + ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC, + sizeof(cmd), &cmd); + if (ret) { + IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret); + return ret; + } + + mvmvif->uploaded = false; + return 0; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c new file mode 100644 index 000000000000..e8264e11b12d --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -0,0 +1,1314 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <net/mac80211.h> + +#include "iwl-op-mode.h" +#include "iwl-io.h" +#include "mvm.h" +#include "sta.h" +#include "time-event.h" +#include "iwl-eeprom-parse.h" +#include "fw-api-scan.h" +#include "iwl-phy-db.h" + +static const struct ieee80211_iface_limit iwl_mvm_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, +}; + +static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { + { + .num_different_channels = 1, + .max_interfaces = 3, + .limits = iwl_mvm_limits, + .n_limits = ARRAY_SIZE(iwl_mvm_limits), + }, +}; + +int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) +{ + struct ieee80211_hw *hw = mvm->hw; + int num_mac, ret; + + /* Tell mac80211 our characteristics */ + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SPECTRUM_MGMT | + IEEE80211_HW_REPORTS_TX_ACK_STATUS | + IEEE80211_HW_QUEUE_CONTROL | + IEEE80211_HW_WANT_MONITOR_VIF | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_TIMING_BEACON_ONLY; + + hw->queues = IWL_FIRST_AMPDU_QUEUE; + hw->offchannel_tx_hw_queue = IWL_OFFCHANNEL_QUEUE; + hw->rate_control_algorithm = "iwl-mvm-rs"; + + /* + * Enable 11w if advertised by firmware and software crypto + * is not enabled (as the firmware will interpret some mgmt + * packets, so enabling it with software crypto isn't safe) + */ + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP && + !iwlwifi_mod_params.sw_crypto) + hw->flags |= IEEE80211_HW_MFP_CAPABLE; + + hw->sta_data_size = sizeof(struct iwl_mvm_sta); + hw->vif_data_size = sizeof(struct iwl_mvm_vif); + hw->chanctx_data_size = sizeof(struct iwl_mvm_phy_ctxt); + + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_DEVICE); + + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | + WIPHY_FLAG_DISABLE_BEACON_HINTS | + WIPHY_FLAG_IBSS_RSN; + + hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; + hw->wiphy->n_iface_combinations = + ARRAY_SIZE(iwl_mvm_iface_combinations); + + hw->wiphy->max_remain_on_channel_duration = 500; + hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; + + /* Extract MAC address */ + memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); + hw->wiphy->addresses = mvm->addresses; + hw->wiphy->n_addresses = 1; + num_mac = mvm->nvm_data->n_hw_addrs; + if (num_mac > 1) { + memcpy(mvm->addresses[1].addr, mvm->addresses[0].addr, + ETH_ALEN); + mvm->addresses[1].addr[5]++; + hw->wiphy->n_addresses++; + } + + /* we create the 802.11 header and a max-length SSID element */ + hw->wiphy->max_scan_ie_len = + mvm->fw->ucode_capa.max_probe_length - 24 - 34; + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; + + if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels) + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; + if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; + + hw->wiphy->hw_version = mvm->trans->hw_id; + + if (iwlwifi_mod_params.power_save) + hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; + else + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | + NL80211_FEATURE_P2P_GO_OPPPS; + + mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; + +#ifdef CONFIG_PM_SLEEP + if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len && + mvm->trans->ops->d3_suspend && + mvm->trans->ops->d3_resume && + device_can_wakeup(mvm->trans->dev)) { + hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_RFKILL_RELEASE; + if (!iwlwifi_mod_params.sw_crypto) + hw->wiphy->wowlan.flags |= + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE | + WIPHY_WOWLAN_4WAY_HANDSHAKE; + + hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; + hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; + hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; + } +#endif + + ret = iwl_mvm_leds_init(mvm); + if (ret) + return ret; + + return ieee80211_register_hw(mvm->hw); +} + +static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + if (test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status)) { + IWL_DEBUG_DROP(mvm, "Dropping - RF KILL\n"); + goto drop; + } + + if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_OFFCHANNEL_QUEUE && + !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) + goto drop; + + if (control->sta) { + if (iwl_mvm_tx_skb(mvm, skb, control->sta)) + goto drop; + return; + } + + if (iwl_mvm_tx_skb_non_sta(mvm, skb)) + goto drop; + return; + drop: + ieee80211_free_txskb(hw, skb); +} + +static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, + u16 *ssn, u8 buf_size) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", + sta->addr, tid, action); + + if (!(mvm->nvm_data->sku_cap_11n_enable)) + return -EACCES; + + mutex_lock(&mvm->mutex); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) { + ret = -EINVAL; + break; + } + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true); + break; + case IEEE80211_AMPDU_RX_STOP: + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); + break; + case IEEE80211_AMPDU_TX_START: + ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size); + break; + default: + WARN_ON_ONCE(1); + ret = -EINVAL; + break; + } + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mvmvif->uploaded = false; + mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + + /* does this make sense at all? */ + mvmvif->color++; + + spin_lock_bh(&mvm->time_event_lock); + iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); + spin_unlock_bh(&mvm->time_event_lock); + + if (vif->type != NL80211_IFTYPE_P2P_DEVICE) + mvmvif->phy_ctxt = NULL; +} + +static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) +{ + iwl_trans_stop_device(mvm->trans); + iwl_trans_stop_hw(mvm->trans, false); + + mvm->scan_status = IWL_MVM_SCAN_NONE; + + /* just in case one was running */ + ieee80211_remain_on_channel_expired(mvm->hw); + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, + iwl_mvm_cleanup_iterator, mvm); + + memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); + memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); + + ieee80211_wake_queues(mvm->hw); + + mvm->vif_count = 0; +} + +static int iwl_mvm_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + + /* Clean up some internal and mac80211 state on restart */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_restart_cleanup(mvm); + + ret = iwl_mvm_up(mvm); + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + + clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + ret = iwl_mvm_update_quotas(mvm, NULL); + if (ret) + IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", + ret); + + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + flush_work(&mvm->async_handlers_wk); + + mutex_lock(&mvm->mutex); + /* async_handlers_wk is now blocked */ + + /* + * The work item could be running or queued if the + * ROC time event stops just as we get here. + */ + cancel_work_sync(&mvm->roc_done_wk); + + iwl_trans_stop_device(mvm->trans); + iwl_trans_stop_hw(mvm->trans, false); + + iwl_mvm_async_handlers_purge(mvm); + /* async_handlers_list is empty and will stay empty: HW is stopped */ + + /* the fw is stopped, the aux sta is dead: clean up driver state */ + iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); + + mutex_unlock(&mvm->mutex); + + /* + * The worker might have been waiting for the mutex, let it run and + * discover that its list is now empty. + */ + cancel_work_sync(&mvm->async_handlers_wk); +} + +static void iwl_mvm_pm_disable_iterator(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = data; + int ret; + + ret = iwl_mvm_power_disable(mvm, vif); + if (ret) + IWL_ERR(mvm, "failed to disable power management\n"); +} + +static void iwl_mvm_power_update_iterator(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = data; + + iwl_mvm_power_update_mode(mvm, vif); +} + +static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + /* + * Not much to do here. The stack will not allow interface + * types or combinations that we didn't advertise, so we + * don't really have to check the types. + */ + + mutex_lock(&mvm->mutex); + + /* Allocate resources for the MAC context, and add it the the fw */ + ret = iwl_mvm_mac_ctxt_init(mvm, vif); + if (ret) + goto out_unlock; + + /* + * The AP binding flow can be done only after the beacon + * template is configured (which happens only in the mac80211 + * start_ap() flow), and adding the broadcast station can happen + * only after the binding. + * In addition, since modifying the MAC before adding a bcast + * station is not allowed by the FW, delay the adding of MAC context to + * the point where we can also add the bcast station. + * In short: there's not much we can do at this point, other than + * allocating resources :) + */ + if (vif->type == NL80211_IFTYPE_AP) { + u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif); + ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, + qmask); + if (ret) { + IWL_ERR(mvm, "Failed to allocate bcast sta\n"); + goto out_release; + } + + goto out_unlock; + } + + /* + * TODO: remove this temporary code. + * Currently MVM FW supports power management only on single MAC. + * Iterate and disable PM on all active interfaces. + * Note: the method below does not count the new interface being added + * at this moment. + */ + mvm->vif_count++; + if (mvm->vif_count > 1) { + IWL_DEBUG_MAC80211(mvm, + "Disable power on existing interfaces\n"); + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_pm_disable_iterator, mvm); + } + + ret = iwl_mvm_mac_ctxt_add(mvm, vif); + if (ret) + goto out_release; + + /* + * Update power state on the new interface. Admittedly, based on + * mac80211 logics this power update will disable power management + */ + iwl_mvm_power_update_mode(mvm, vif); + + /* + * P2P_DEVICE interface does not have a channel context assigned to it, + * so a dedicated PHY context is allocated to it and the corresponding + * MAC context is bound to it at this stage. + */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + struct ieee80211_channel *chan; + struct cfg80211_chan_def chandef; + + mvmvif->phy_ctxt = &mvm->phy_ctxt_roc; + + /* + * The channel used here isn't relevant as it's + * going to be overwritten as part of the ROC flow. + * For now use the first channel we have. + */ + chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); + ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, + &chandef, 1, 1); + if (ret) + goto out_remove_mac; + + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (ret) + goto out_remove_phy; + + ret = iwl_mvm_add_bcast_sta(mvm, vif, &mvmvif->bcast_sta); + if (ret) + goto out_unbind; + + /* Save a pointer to p2p device vif, so it can later be used to + * update the p2p device MAC when a GO is started/stopped */ + mvm->p2p_device_vif = vif; + } + + goto out_unlock; + + out_unbind: + iwl_mvm_binding_remove_vif(mvm, vif); + out_remove_phy: + iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt); + out_remove_mac: + mvmvif->phy_ctxt = NULL; + iwl_mvm_mac_ctxt_remove(mvm, vif); + out_release: + /* + * TODO: remove this temporary code. + * Currently MVM FW supports power management only on single MAC. + * Check if only one additional interface remains after rereasing + * current one. Update power mode on the remaining interface. + */ + mvm->vif_count--; + IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n", + mvm->vif_count); + if (mvm->vif_count == 1) { + ieee80211_iterate_active_interfaces( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_update_iterator, mvm); + } + iwl_mvm_mac_ctxt_release(mvm, vif); + out_unlock: + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 tfd_msk = 0, ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) + tfd_msk |= BIT(vif->hw_queue[ac]); + + if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) + tfd_msk |= BIT(vif->cab_queue); + + if (tfd_msk) { + mutex_lock(&mvm->mutex); + iwl_mvm_flush_tx_path(mvm, tfd_msk, true); + mutex_unlock(&mvm->mutex); + } + + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + /* + * Flush the ROC worker which will flush the OFFCHANNEL queue. + * We assume here that all the packets sent to the OFFCHANNEL + * queue are sent in ROC session. + */ + flush_work(&mvm->roc_done_wk); + } else { + /* + * By now, all the AC queues are empty. The AGG queues are + * empty too. We already got all the Tx responses for all the + * packets in the queues. The drain work can have been + * triggered. Flush it. This work item takes the mutex, so kill + * it before we take it. + */ + flush_work(&mvm->sta_drained_wk); + } + + mutex_lock(&mvm->mutex); + + /* + * For AP/GO interface, the tear down of the resources allocated to the + * interface should be handled as part of the bss_info_changed flow. + */ + if (vif->type == NL80211_IFTYPE_AP) { + iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); + goto out_release; + } + + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + mvm->p2p_device_vif = NULL; + iwl_mvm_rm_bcast_sta(mvm, &mvmvif->bcast_sta); + iwl_mvm_binding_remove_vif(mvm, vif); + iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt); + mvmvif->phy_ctxt = NULL; + } + + /* + * TODO: remove this temporary code. + * Currently MVM FW supports power management only on single MAC. + * Check if only one additional interface remains after removing + * current one. Update power mode on the remaining interface. + */ + if (mvm->vif_count) + mvm->vif_count--; + IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n", + mvm->vif_count); + if (mvm->vif_count == 1) { + ieee80211_iterate_active_interfaces( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_update_iterator, mvm); + } + + iwl_mvm_mac_ctxt_remove(mvm, vif); + +out_release: + iwl_mvm_mac_ctxt_release(mvm, vif); + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) +{ + return 0; +} + +static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + *total_flags = 0; +} + +static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + ret = iwl_mvm_mac_ctxt_changed(mvm, vif); + if (ret) + IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + + if (changes & BSS_CHANGED_ASSOC) { + if (bss_conf->assoc) { + /* add quota for this interface */ + ret = iwl_mvm_update_quotas(mvm, vif); + if (ret) { + IWL_ERR(mvm, "failed to update quotas\n"); + return; + } + } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { + /* remove AP station now that the MAC is unassoc */ + ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); + if (ret) + IWL_ERR(mvm, "failed to remove AP station\n"); + mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + /* remove quota for this interface */ + ret = iwl_mvm_update_quotas(mvm, NULL); + if (ret) + IWL_ERR(mvm, "failed to update quotas\n"); + } + } else if (changes & BSS_CHANGED_DTIM_PERIOD) { + /* + * We received a beacon _after_ association so + * remove the session protection. + */ + iwl_mvm_remove_time_event(mvm, mvmvif, + &mvmvif->time_event_data); + } else if (changes & BSS_CHANGED_PS) { + /* + * TODO: remove this temporary code. + * Currently MVM FW supports power management only on single + * MAC. Avoid power mode update if more than one interface + * is active. + */ + IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n", + mvm->vif_count); + if (mvm->vif_count == 1) { + ret = iwl_mvm_power_update_mode(mvm, vif); + if (ret) + IWL_ERR(mvm, "failed to update power mode\n"); + } + } +} + +static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + mutex_lock(&mvm->mutex); + + /* Send the beacon template */ + ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); + if (ret) + goto out_unlock; + + /* Add the mac context */ + ret = iwl_mvm_mac_ctxt_add(mvm, vif); + if (ret) + goto out_unlock; + + /* Perform the binding */ + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (ret) + goto out_remove; + + mvmvif->ap_active = true; + + /* Send the bcast station. At this stage the TBTT and DTIM time events + * are added and applied to the scheduler */ + ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta); + if (ret) + goto out_unbind; + + ret = iwl_mvm_update_quotas(mvm, vif); + if (ret) + goto out_rm_bcast; + + /* Need to update the P2P Device MAC */ + if (vif->p2p && mvm->p2p_device_vif) + iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif); + + mutex_unlock(&mvm->mutex); + return 0; + +out_rm_bcast: + iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta); +out_unbind: + iwl_mvm_binding_remove_vif(mvm, vif); +out_remove: + iwl_mvm_mac_ctxt_remove(mvm, vif); +out_unlock: + mutex_unlock(&mvm->mutex); + return ret; +} + +static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mutex_lock(&mvm->mutex); + + mvmvif->ap_active = false; + + /* Need to update the P2P Device MAC */ + if (vif->p2p && mvm->p2p_device_vif) + iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif); + + iwl_mvm_update_quotas(mvm, NULL); + iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta); + iwl_mvm_binding_remove_vif(mvm, vif); + iwl_mvm_mac_ctxt_remove(mvm, vif); + + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_bss_info_changed_ap(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + /* Need to send a new beacon template to the FW */ + if (changes & BSS_CHANGED_BEACON) { + if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) + IWL_WARN(mvm, "Failed updating beacon data\n"); + } +} + +static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); + break; + case NL80211_IFTYPE_AP: + iwl_mvm_bss_info_changed_ap(mvm, vif, bss_conf, changes); + break; + default: + /* shouldn't happen */ + WARN_ON_ONCE(1); + } + + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + if (req->n_channels == 0 || req->n_channels > MAX_NUM_SCAN_CHANNELS) + return -EINVAL; + + mutex_lock(&mvm->mutex); + + if (mvm->scan_status == IWL_MVM_SCAN_NONE) + ret = iwl_mvm_scan_request(mvm, vif, req); + else + ret = -EBUSY; + + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + + iwl_mvm_cancel_scan(mvm); + + mutex_unlock(&mvm->mutex); +} + +static void +iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tid, + int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* TODO: how do we tell the fw to send frames for a specific TID */ + + /* + * The fw will send EOSP notification when the last frame will be + * transmitted. + */ + iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames); +} + +static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; + + switch (cmd) { + case STA_NOTIFY_SLEEP: + if (atomic_read(&mvmsta->pending_frames) > 0) + ieee80211_sta_block_awake(hw, sta, true); + /* + * The fw updates the STA to be asleep. Tx packets on the Tx + * queues to this station will not be transmitted. The fw will + * send a Tx response with TX_STATUS_FAIL_DEST_PS. + */ + break; + case STA_NOTIFY_AWAKE: + if (WARN_ON(mvmsta->sta_id == IWL_INVALID_STATION)) + break; + iwl_mvm_sta_modify_ps_wake(mvm, sta); + break; + default: + break; + } +} + +static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", + sta->addr, old_state, new_state); + + /* this would be a mac80211 bug ... but don't crash */ + if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) + return -EINVAL; + + /* if a STA is being removed, reuse its ID */ + flush_work(&mvm->sta_drained_wk); + + mutex_lock(&mvm->mutex); + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) { + ret = iwl_mvm_add_sta(mvm, vif, sta); + } else if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_AUTH) { + ret = 0; + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { + ret = iwl_mvm_update_sta(mvm, vif, sta); + if (ret == 0) + iwl_mvm_rs_rate_init(mvm, sta, + mvmvif->phy_ctxt->channel->band); + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTHORIZED) { + ret = 0; + } else if (old_state == IEEE80211_STA_AUTHORIZED && + new_state == IEEE80211_STA_ASSOC) { + ret = 0; + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + ret = 0; + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_NONE) { + ret = 0; + } else if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST) { + ret = iwl_mvm_rm_sta(mvm, vif, sta); + } else { + ret = -EIO; + } + mutex_unlock(&mvm->mutex); + + return ret; +} + +static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mvm->rts_threshold = value; + + return 0; +} + +static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mvmvif->queue_params[ac] = *params; + + /* + * No need to update right away, we'll get BSS_CHANGED_QOS + * The exception is P2P_DEVICE interface which needs immediate update. + */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + int ret; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_mac_ctxt_changed(mvm, vif); + mutex_unlock(&mvm->mutex); + return ret; + } + return 0; +} + +static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS, + 200 + vif->bss_conf.beacon_int); + u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS, + 100 + vif->bss_conf.beacon_int); + + if (WARN_ON_ONCE(vif->bss_conf.assoc)) + return; + + mutex_lock(&mvm->mutex); + /* Try really hard to protect the session and hear a beacon */ + iwl_mvm_protect_session(mvm, vif, duration, min_duration); + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, + enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + if (iwlwifi_mod_params.sw_crypto) { + IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + /* fall-through */ + case WLAN_CIPHER_SUITE_CCMP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + WARN_ON_ONCE(!(hw->flags & IEEE80211_HW_MFP_CAPABLE)); + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + /* + * Support for TX only, at least for now, so accept + * the key and do nothing else. Then mac80211 will + * pass it for TX but we don't have to use it for RX. + */ + return 0; + default: + return -EOPNOTSUPP; + } + + mutex_lock(&mvm->mutex); + + switch (cmd) { + case SET_KEY: + IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, false); + if (ret) { + IWL_WARN(mvm, "set key failed\n"); + /* + * can't add key for RX, but we don't need it + * in the device for TX so still return 0 + */ + ret = 0; + } + + break; + case DISABLE_KEY: + IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); + ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&mvm->mutex); + return ret; +} + +static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); +} + + +static int iwl_mvm_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel *channel, + int duration) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct cfg80211_chan_def chandef; + int ret; + + if (vif->type != NL80211_IFTYPE_P2P_DEVICE) { + IWL_ERR(mvm, "vif isn't a P2P_DEVICE: %d\n", vif->type); + return -EINVAL; + } + + IWL_DEBUG_MAC80211(mvm, "enter (%d, %d)\n", channel->hw_value, + duration); + + mutex_lock(&mvm->mutex); + + cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); + ret = iwl_mvm_phy_ctxt_changed(mvm, &mvm->phy_ctxt_roc, + &chandef, 1, 1); + + /* Schedule the time events */ + ret = iwl_mvm_start_p2p_roc(mvm, vif, duration); + + mutex_unlock(&mvm->mutex); + IWL_DEBUG_MAC80211(mvm, "leave\n"); + + return ret; +} + +static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + IWL_DEBUG_MAC80211(mvm, "enter\n"); + + mutex_lock(&mvm->mutex); + iwl_mvm_stop_p2p_roc(mvm); + mutex_unlock(&mvm->mutex); + + IWL_DEBUG_MAC80211(mvm, "leave\n"); + return 0; +} + +static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; + int ret; + + mutex_lock(&mvm->mutex); + + IWL_DEBUG_MAC80211(mvm, "Add PHY context\n"); + ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, &ctx->def, + ctx->rx_chains_static, + ctx->rx_chains_dynamic); + mutex_unlock(&mvm->mutex); + return ret; +} + +static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; + + mutex_lock(&mvm->mutex); + iwl_mvm_phy_ctxt_remove(mvm, phy_ctxt); + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + u32 changed) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; + + mutex_lock(&mvm->mutex); + iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def, + ctx->rx_chains_static, + ctx->rx_chains_dynamic); + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_phy_ctxt *phyctx = (void *)ctx->drv_priv; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + mutex_lock(&mvm->mutex); + + mvmvif->phy_ctxt = phyctx; + + switch (vif->type) { + case NL80211_IFTYPE_AP: + /* + * The AP binding flow is handled as part of the start_ap flow + * (in bss_info_changed). + */ + ret = 0; + goto out_unlock; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MONITOR: + break; + default: + ret = -EINVAL; + goto out_unlock; + } + + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (ret) + goto out_unlock; + + /* + * Setting the quota at this stage is only required for monitor + * interfaces. For the other types, the bss_info changed flow + * will handle quota settings. + */ + if (vif->type == NL80211_IFTYPE_MONITOR) { + ret = iwl_mvm_update_quotas(mvm, vif); + if (ret) + goto out_remove_binding; + } + + goto out_unlock; + + out_remove_binding: + iwl_mvm_binding_remove_vif(mvm, vif); + out_unlock: + mutex_unlock(&mvm->mutex); + if (ret) + mvmvif->phy_ctxt = NULL; + return ret; +} + +static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mutex_lock(&mvm->mutex); + + iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); + + if (vif->type == NL80211_IFTYPE_AP) + goto out_unlock; + + iwl_mvm_binding_remove_vif(mvm, vif); + switch (vif->type) { + case NL80211_IFTYPE_MONITOR: + iwl_mvm_update_quotas(mvm, vif); + break; + default: + break; + } + +out_unlock: + mvmvif->phy_ctxt = NULL; + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + bool set) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; + + if (!mvm_sta || !mvm_sta->vif) { + IWL_ERR(mvm, "Station is not associated to a vif\n"); + return -EINVAL; + } + + return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); +} + +struct ieee80211_ops iwl_mvm_hw_ops = { + .tx = iwl_mvm_mac_tx, + .ampdu_action = iwl_mvm_mac_ampdu_action, + .start = iwl_mvm_mac_start, + .restart_complete = iwl_mvm_mac_restart_complete, + .stop = iwl_mvm_mac_stop, + .add_interface = iwl_mvm_mac_add_interface, + .remove_interface = iwl_mvm_mac_remove_interface, + .config = iwl_mvm_mac_config, + .configure_filter = iwl_mvm_configure_filter, + .bss_info_changed = iwl_mvm_bss_info_changed, + .hw_scan = iwl_mvm_mac_hw_scan, + .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, + .sta_state = iwl_mvm_mac_sta_state, + .sta_notify = iwl_mvm_mac_sta_notify, + .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, + .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, + .conf_tx = iwl_mvm_mac_conf_tx, + .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, + .set_key = iwl_mvm_mac_set_key, + .update_tkip_key = iwl_mvm_mac_update_tkip_key, + .remain_on_channel = iwl_mvm_roc, + .cancel_remain_on_channel = iwl_mvm_cancel_roc, + + .add_chanctx = iwl_mvm_add_chanctx, + .remove_chanctx = iwl_mvm_remove_chanctx, + .change_chanctx = iwl_mvm_change_chanctx, + .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, + .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, + + .start_ap = iwl_mvm_start_ap, + .stop_ap = iwl_mvm_stop_ap, + + .set_tim = iwl_mvm_set_tim, + +#ifdef CONFIG_PM_SLEEP + /* look at d3.c */ + .suspend = iwl_mvm_suspend, + .resume = iwl_mvm_resume, + .set_wakeup = iwl_mvm_set_wakeup, + .set_rekey_data = iwl_mvm_set_rekey_data, +#if IS_ENABLED(CONFIG_IPV6) + .ipv6_addr_change = iwl_mvm_ipv6_addr_change, +#endif + .set_default_unicast_key = iwl_mvm_set_default_unicast_key, +#endif +}; diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h new file mode 100644 index 000000000000..4e339ccfa800 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -0,0 +1,500 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __IWL_MVM_H__ +#define __IWL_MVM_H__ + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/leds.h> +#include <linux/in6.h> + +#include "iwl-op-mode.h" +#include "iwl-trans.h" +#include "iwl-notif-wait.h" +#include "iwl-eeprom-parse.h" +#include "iwl-test.h" +#include "iwl-trans.h" +#include "sta.h" +#include "fw-api.h" + +#define IWL_INVALID_MAC80211_QUEUE 0xff +#define IWL_MVM_MAX_ADDRESSES 2 +#define IWL_RSSI_OFFSET 44 + +enum iwl_mvm_tx_fifo { + IWL_MVM_TX_FIFO_BK = 0, + IWL_MVM_TX_FIFO_BE, + IWL_MVM_TX_FIFO_VI, + IWL_MVM_TX_FIFO_VO, +}; + +/* Placeholder */ +#define IWL_OFFCHANNEL_QUEUE 8 +#define IWL_FIRST_AMPDU_QUEUE 11 + +extern struct ieee80211_ops iwl_mvm_hw_ops; +/** + * struct iwl_mvm_mod_params - module parameters for iwlmvm + * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted. + * We will register to mac80211 to have testmode working. The NIC must not + * be up'ed after the INIT fw asserted. This is useful to be able to use + * proprietary tools over testmode to debug the INIT fw. + * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power + * Save)-2(default), LP(Low Power)-3 + */ +struct iwl_mvm_mod_params { + bool init_dbg; + int power_scheme; +}; +extern struct iwl_mvm_mod_params iwlmvm_mod_params; + +struct iwl_mvm_phy_ctxt { + u16 id; + u16 color; + + /* + * TODO: This should probably be removed. Currently here only for rate + * scaling algorithm + */ + struct ieee80211_channel *channel; +}; + +struct iwl_mvm_time_event_data { + struct ieee80211_vif *vif; + struct list_head list; + unsigned long end_jiffies; + u32 duration; + bool running; + u32 uid; + + /* + * The access to the 'id' field must be done when the + * mvm->time_event_lock is held, as it value is used to indicate + * if the te is in the time event list or not (when id == TE_MAX) + */ + u32 id; +}; + + /* Power management */ + +/** + * enum iwl_power_scheme + * @IWL_POWER_LEVEL_CAM - Continuously Active Mode + * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default) + * @IWL_POWER_LEVEL_LP - Low Power + */ +enum iwl_power_scheme { + IWL_POWER_SCHEME_CAM = 1, + IWL_POWER_SCHEME_BPS, + IWL_POWER_SCHEME_LP +}; + +#define IWL_CONN_MAX_LISTEN_INTERVAL 70 + +/** + * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context + * @id: between 0 and 3 + * @color: to solve races upon MAC addition and removal + * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA + * @uploaded: indicates the MAC context has been added to the device + * @ap_active: indicates that ap context is configured, and that the interface + * should get quota etc. + * @queue_params: QoS params for this MAC + * @bcast_sta: station used for broadcast packets. Used by the following + * vifs: P2P_DEVICE, GO and AP. + * @beacon_skb: the skb used to hold the AP/GO beacon template + */ +struct iwl_mvm_vif { + u16 id; + u16 color; + u8 ap_sta_id; + + bool uploaded; + bool ap_active; + + enum iwl_tsf_id tsf_id; + + /* + * QoS data from mac80211, need to store this here + * as mac80211 has a separate callback but we need + * to have the data for the MAC context + */ + struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; + struct iwl_mvm_time_event_data time_event_data; + + struct iwl_mvm_int_sta bcast_sta; + + /* + * Assigned while mac80211 has the interface in a channel context, + * or, for P2P Device, while it exists. + */ + struct iwl_mvm_phy_ctxt *phy_ctxt; + +#ifdef CONFIG_PM_SLEEP + /* WoWLAN GTK rekey data */ + struct { + u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; + __le64 replay_ctr; + bool valid; + } rekey_data; + + int tx_key_idx; + +#if IS_ENABLED(CONFIG_IPV6) + /* IPv6 addresses for WoWLAN */ + struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS]; + int num_target_ipv6_addrs; +#endif +#endif + +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct dentry *dbgfs_dir; + void *dbgfs_data; +#endif +}; + +static inline struct iwl_mvm_vif * +iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) +{ + return (void *)vif->drv_priv; +} + +enum iwl_mvm_status { + IWL_MVM_STATUS_HW_RFKILL, + IWL_MVM_STATUS_ROC_RUNNING, + IWL_MVM_STATUS_IN_HW_RESTART, +}; + +enum iwl_scan_status { + IWL_MVM_SCAN_NONE, + IWL_MVM_SCAN_OS, +}; + +/** + * struct iwl_nvm_section - describes an NVM section in memory. + * + * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD, + * and saved for later use by the driver. Not all NVM sections are saved + * this way, only the needed ones. + */ +struct iwl_nvm_section { + u16 length; + const u8 *data; +}; + +struct iwl_mvm { + /* for logger access */ + struct device *dev; + + struct iwl_trans *trans; + const struct iwl_fw *fw; + const struct iwl_cfg *cfg; + struct iwl_phy_db *phy_db; + struct ieee80211_hw *hw; + + /* for protecting access to iwl_mvm */ + struct mutex mutex; + struct list_head async_handlers_list; + spinlock_t async_handlers_lock; + struct work_struct async_handlers_wk; + + struct work_struct roc_done_wk; + + unsigned long status; + + enum iwl_ucode_type cur_ucode; + bool ucode_loaded; + bool init_ucode_run; + u32 error_event_table; + u32 log_event_table; + + u32 ampdu_ref; + + struct iwl_notif_wait_data notif_wait; + + unsigned long transport_queue_stop; + u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; + atomic_t queue_stop_count[IWL_MAX_HW_QUEUES]; + + struct iwl_nvm_data *nvm_data; + /* eeprom blob for debugfs/testmode */ + u8 *eeprom_blob; + size_t eeprom_blob_size; + /* NVM sections for 7000 family */ + struct iwl_nvm_section nvm_sections[NVM_NUM_OF_SECTIONS]; + + /* EEPROM MAC addresses */ + struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; + + /* data related to data path */ + struct iwl_rx_phy_info last_phy_info; + struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; + struct work_struct sta_drained_wk; + unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; + + /* configured by mac80211 */ + u32 rts_threshold; + + /* Scan status, cmd (pre-allocated) and auxiliary station */ + enum iwl_scan_status scan_status; + struct iwl_scan_cmd *scan_cmd; + + /* Internal station */ + struct iwl_mvm_int_sta aux_sta; + + u8 scan_last_antenna_idx; /* to toggle TX between antennas */ + u8 mgmt_last_antenna_idx; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct dentry *debugfs_dir; + u32 dbgfs_sram_offset, dbgfs_sram_len; + bool prevent_power_down_d3; +#endif + + struct iwl_mvm_phy_ctxt phy_ctxt_roc; + + struct list_head time_event_list; + spinlock_t time_event_lock; + + /* + * A bitmap indicating the index of the key in use. The firmware + * can hold 16 keys at most. Reflect this fact. + */ + unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; + u8 vif_count; + + struct led_classdev led; + + struct ieee80211_vif *p2p_device_vif; +}; + +/* Extract MVM priv from op_mode and _hw */ +#define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \ + ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific) + +#define IWL_MAC80211_GET_MVM(_hw) \ + IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) + +extern const u8 iwl_mvm_ac_to_tx_fifo[]; + +struct iwl_rate_info { + u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ + u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ + u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ + u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ + u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ +}; + +/****************** + * MVM Methods + ******************/ +/* uCode */ +int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); + +/* Utils */ +int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, + enum ieee80211_band band); +u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); +void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); +u8 first_antenna(u8 mask); +u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); + +/* Tx / Host Commands */ +int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, + struct iwl_host_cmd *cmd); +int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id, + u32 flags, u16 len, const void *data); +int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, + struct iwl_host_cmd *cmd, + u32 *status); +int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, + u16 len, const void *data, + u32 *status); +int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta); +int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); +#ifdef CONFIG_IWLWIFI_DEBUG +const char *iwl_mvm_get_tx_fail_reason(u32 status); +#else +static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } +#endif +int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync); +void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); + +/* Statistics */ +int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); + +/* NVM */ +int iwl_nvm_init(struct iwl_mvm *mvm); + +int iwl_mvm_up(struct iwl_mvm *mvm); +int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); + +int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); + +/* + * FW notifications / CMD responses handlers + * Convention: iwl_mvm_rx_<NAME OF THE CMD> + */ +int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); + +/* MVM PHY */ +int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic); +int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic); +void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm, + struct iwl_mvm_phy_ctxt *ctxt); + +/* MAC (virtual interface) programming */ +int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); + +/* Bindings */ +int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); + +/* Quota management */ +int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif); + +/* Scanning */ +int iwl_mvm_scan_request(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req); +int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +void iwl_mvm_cancel_scan(struct iwl_mvm *mvm); + +/* MVM debugfs */ +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); +int iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct dentry *dbgfs_dir); +void iwl_power_get_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_powertable_cmd *cmd); +#else +static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, + struct dentry *dbgfs_dir) +{ + return 0; +} +#endif /* CONFIG_IWLWIFI_DEBUGFS */ + +/* rate scaling */ +int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, + u8 flags, bool init); + +/* power managment */ +int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif); + +int iwl_mvm_leds_init(struct iwl_mvm *mvm); +void iwl_mvm_leds_exit(struct iwl_mvm *mvm); + +/* D3 (WoWLAN, NetDetect) */ +int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); +int iwl_mvm_resume(struct ieee80211_hw *hw); +void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled); +void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data); +void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev); +void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int idx); + +#endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c new file mode 100644 index 000000000000..20016bcbdeab --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c @@ -0,0 +1,311 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-trans.h" +#include "mvm.h" +#include "iwl-eeprom-parse.h" +#include "iwl-eeprom-read.h" +#include "iwl-nvm-parse.h" + +/* list of NVM sections we are allowed/need to read */ +static const int nvm_to_read[] = { + NVM_SECTION_TYPE_HW, + NVM_SECTION_TYPE_SW, + NVM_SECTION_TYPE_CALIBRATION, + NVM_SECTION_TYPE_PRODUCTION, +}; + +/* used to simplify the shared operations on NCM_ACCESS_CMD versions */ +union iwl_nvm_access_cmd { + struct iwl_nvm_access_cmd_ver1 ver1; + struct iwl_nvm_access_cmd_ver2 ver2; +}; +union iwl_nvm_access_resp { + struct iwl_nvm_access_resp_ver1 ver1; + struct iwl_nvm_access_resp_ver2 ver2; +}; + +static inline void iwl_nvm_fill_read_ver1(struct iwl_nvm_access_cmd_ver1 *cmd, + u16 offset, u16 length) +{ + cmd->offset = cpu_to_le16(offset); + cmd->length = cpu_to_le16(length); + cmd->cache_refresh = 1; +} + +static inline void iwl_nvm_fill_read_ver2(struct iwl_nvm_access_cmd_ver2 *cmd, + u16 offset, u16 length, u16 section) +{ + cmd->offset = cpu_to_le16(offset); + cmd->length = cpu_to_le16(length); + cmd->type = cpu_to_le16(section); +} + +static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, + u16 offset, u16 length, u8 *data) +{ + union iwl_nvm_access_cmd nvm_access_cmd; + union iwl_nvm_access_resp *nvm_resp; + struct iwl_rx_packet *pkt; + struct iwl_host_cmd cmd = { + .id = NVM_ACCESS_CMD, + .flags = CMD_SYNC | CMD_WANT_SKB, + .data = { &nvm_access_cmd, }, + }; + int ret, bytes_read, offset_read; + u8 *resp_data; + + memset(&nvm_access_cmd, 0, sizeof(nvm_access_cmd)); + + /* TODO: not sure family should be the decider, maybe FW version? */ + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + iwl_nvm_fill_read_ver2(&(nvm_access_cmd.ver2), + offset, length, section); + cmd.len[0] = sizeof(struct iwl_nvm_access_cmd_ver2); + } else { + iwl_nvm_fill_read_ver1(&(nvm_access_cmd.ver1), + offset, length); + cmd.len[0] = sizeof(struct iwl_nvm_access_cmd_ver1); + } + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) + return ret; + + pkt = cmd.resp_pkt; + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n", + pkt->hdr.flags); + ret = -EIO; + goto exit; + } + + /* Extract NVM response */ + nvm_resp = (void *)pkt->data; + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + ret = le16_to_cpu(nvm_resp->ver2.status); + bytes_read = le16_to_cpu(nvm_resp->ver2.length); + offset_read = le16_to_cpu(nvm_resp->ver2.offset); + resp_data = nvm_resp->ver2.data; + } else { + ret = le16_to_cpu(nvm_resp->ver1.length) <= 0; + bytes_read = le16_to_cpu(nvm_resp->ver1.length); + offset_read = le16_to_cpu(nvm_resp->ver1.offset); + resp_data = nvm_resp->ver1.data; + } + if (ret) { + IWL_ERR(mvm, + "NVM access command failed with status %d (device: %s)\n", + ret, mvm->cfg->name); + ret = -EINVAL; + goto exit; + } + + if (offset_read != offset) { + IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n", + offset_read); + ret = -EINVAL; + goto exit; + } + + /* Write data to NVM */ + memcpy(data + offset, resp_data, bytes_read); + ret = bytes_read; + +exit: + iwl_free_resp(&cmd); + return ret; +} + +/* + * Reads an NVM section completely. + * NICs prior to 7000 family doesn't have a real NVM, but just read + * section 0 which is the EEPROM. Because the EEPROM reading is unlimited + * by uCode, we need to manually check in this case that we don't + * overflow and try to read more than the EEPROM size. + * For 7000 family NICs, we supply the maximal size we can read, and + * the uCode fills the response with as much data as we can, + * without overflowing, so no check is needed. + */ +static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, + u8 *data) +{ + u16 length, offset = 0; + int ret; + bool old_eeprom = mvm->cfg->device_family != IWL_DEVICE_FAMILY_7000; + + length = (iwlwifi_mod_params.amsdu_size_8K ? (8 * 1024) : (4 * 1024)) + - sizeof(union iwl_nvm_access_cmd) + - sizeof(struct iwl_rx_packet); + /* + * if length is greater than EEPROM size, truncate it because uCode + * doesn't check it by itself, and exit the loop when reached. + */ + if (old_eeprom && length > mvm->cfg->base_params->eeprom_size) + length = mvm->cfg->base_params->eeprom_size; + ret = length; + + /* Read the NVM until exhausted (reading less than requested) */ + while (ret == length) { + ret = iwl_nvm_read_chunk(mvm, section, offset, length, data); + if (ret < 0) { + IWL_ERR(mvm, + "Cannot read NVM from section %d offset %d, length %d\n", + section, offset, length); + return ret; + } + offset += ret; + if (old_eeprom && offset == mvm->cfg->base_params->eeprom_size) + break; + } + + IWL_INFO(mvm, "NVM section %d read completed\n", section); + return offset; +} + +static struct iwl_nvm_data * +iwl_parse_nvm_sections(struct iwl_mvm *mvm) +{ + struct iwl_nvm_section *sections = mvm->nvm_sections; + const __le16 *hw, *sw, *calib; + + /* Checking for required sections */ + if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || + !mvm->nvm_sections[NVM_SECTION_TYPE_HW].data) { + IWL_ERR(mvm, "Can't parse empty NVM sections\n"); + return NULL; + } + + if (WARN_ON(!mvm->cfg)) + return NULL; + + hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data; + sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; + calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; + return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib); +} + +int iwl_nvm_init(struct iwl_mvm *mvm) +{ + int ret, i, section; + u8 *nvm_buffer, *temp; + + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + /* TODO: find correct NVM max size for a section */ + nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, + GFP_KERNEL); + if (!nvm_buffer) + return -ENOMEM; + for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) { + section = nvm_to_read[i]; + /* we override the constness for initial read */ + ret = iwl_nvm_read_section(mvm, section, nvm_buffer); + if (ret < 0) + break; + temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); + if (!temp) { + ret = -ENOMEM; + break; + } + mvm->nvm_sections[section].data = temp; + mvm->nvm_sections[section].length = ret; + } + kfree(nvm_buffer); + if (ret < 0) + return ret; + } else { + /* allocate eeprom */ + mvm->eeprom_blob_size = mvm->cfg->base_params->eeprom_size; + IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM size = %zd\n", + mvm->eeprom_blob_size); + mvm->eeprom_blob = kzalloc(mvm->eeprom_blob_size, GFP_KERNEL); + if (!mvm->eeprom_blob) + return -ENOMEM; + + ret = iwl_nvm_read_section(mvm, 0, mvm->eeprom_blob); + if (ret != mvm->eeprom_blob_size) { + IWL_ERR(mvm, "Read partial NVM %d/%zd\n", + ret, mvm->eeprom_blob_size); + kfree(mvm->eeprom_blob); + mvm->eeprom_blob = NULL; + return -EINVAL; + } + } + + ret = 0; + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) + mvm->nvm_data = iwl_parse_nvm_sections(mvm); + else + mvm->nvm_data = + iwl_parse_eeprom_data(mvm->trans->dev, + mvm->cfg, + mvm->eeprom_blob, + mvm->eeprom_blob_size); + + if (!mvm->nvm_data) { + kfree(mvm->eeprom_blob); + mvm->eeprom_blob = NULL; + ret = -ENOMEM; + } + + return ret; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c new file mode 100644 index 000000000000..aa59adf87db3 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -0,0 +1,682 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include <linux/module.h> +#include <net/mac80211.h> + +#include "iwl-notif-wait.h" +#include "iwl-trans.h" +#include "iwl-op-mode.h" +#include "iwl-fw.h" +#include "iwl-debug.h" +#include "iwl-drv.h" +#include "iwl-modparams.h" +#include "mvm.h" +#include "iwl-phy-db.h" +#include "iwl-eeprom-parse.h" +#include "iwl-csr.h" +#include "iwl-io.h" +#include "iwl-prph.h" +#include "rs.h" +#include "fw-api-scan.h" +#include "time-event.h" + +/* + * module name, copyright, version, etc. + */ +#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" + +#define DRV_VERSION IWLWIFI_VERSION + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + +static const struct iwl_op_mode_ops iwl_mvm_ops; + +struct iwl_mvm_mod_params iwlmvm_mod_params = { + .power_scheme = IWL_POWER_SCHEME_BPS, + /* rest of fields are 0 by default */ +}; + +module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO); +MODULE_PARM_DESC(init_dbg, + "set to true to debug an ASSERT in INIT fw (default: false"); +module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO); +MODULE_PARM_DESC(power_scheme, + "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); + +/* + * module init and exit functions + */ +static int __init iwl_mvm_init(void) +{ + int ret; + + ret = iwl_mvm_rate_control_register(); + if (ret) { + pr_err("Unable to register rate control algorithm: %d\n", ret); + return ret; + } + + ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops); + + if (ret) { + pr_err("Unable to register MVM op_mode: %d\n", ret); + iwl_mvm_rate_control_unregister(); + } + + return ret; +} +module_init(iwl_mvm_init); + +static void __exit iwl_mvm_exit(void) +{ + iwl_opmode_deregister("iwlmvm"); + iwl_mvm_rate_control_unregister(); +} +module_exit(iwl_mvm_exit); + +static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; + u32 reg_val = 0; + + /* + * We can't upload the correct value to the INIT image + * as we don't have nvm_data by that time. + * + * TODO: Figure out what we should do here + */ + if (mvm->nvm_data) { + radio_cfg_type = mvm->nvm_data->radio_cfg_type; + radio_cfg_step = mvm->nvm_data->radio_cfg_step; + radio_cfg_dash = mvm->nvm_data->radio_cfg_dash; + } else { + radio_cfg_type = 0; + radio_cfg_step = 0; + radio_cfg_dash = 0; + } + + /* SKU control */ + reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) << + CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; + reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) << + CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; + + /* radio configuration */ + reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; + reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; + reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; + + WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) & + ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE); + + /* silicon bits */ + reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; + reg_val |= CSR_HW_IF_CONFIG_REG_BIT_MAC_SI; + + iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | + CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | + CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | + CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | + CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI, + reg_val); + + IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, + radio_cfg_step, radio_cfg_dash); + + /* + * W/A : NIC is stuck in a reset state after Early PCIe power off + * (PCIe power is lost before PERST# is asserted), causing ME FW + * to lose ownership and not being able to obtain it back. + */ + iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, + ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); +} + +struct iwl_rx_handlers { + u8 cmd_id; + bool async; + int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +}; + +#define RX_HANDLER(_cmd_id, _fn, _async) \ + { .cmd_id = _cmd_id , .fn = _fn , .async = _async } + +/* + * Handlers for fw notifications + * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME + * This list should be in order of frequency for performance purposes. + * + * The handler can be SYNC - this means that it will be called in the Rx path + * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and + * only in this case!), it should be set as ASYNC. In that case, it will be + * called from a worker with mvm->mutex held. + */ +static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { + RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false), + RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false), + RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false), + RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false), + RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), + + RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false), + RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false), + + RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false), + RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), + + RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false), +}; +#undef RX_HANDLER +#define CMD(x) [x] = #x + +static const char *iwl_mvm_cmd_strings[REPLY_MAX] = { + CMD(MVM_ALIVE), + CMD(REPLY_ERROR), + CMD(INIT_COMPLETE_NOTIF), + CMD(PHY_CONTEXT_CMD), + CMD(MGMT_MCAST_KEY), + CMD(TX_CMD), + CMD(TXPATH_FLUSH), + CMD(MAC_CONTEXT_CMD), + CMD(TIME_EVENT_CMD), + CMD(TIME_EVENT_NOTIFICATION), + CMD(BINDING_CONTEXT_CMD), + CMD(TIME_QUOTA_CMD), + CMD(RADIO_VERSION_NOTIFICATION), + CMD(SCAN_REQUEST_CMD), + CMD(SCAN_ABORT_CMD), + CMD(SCAN_START_NOTIFICATION), + CMD(SCAN_RESULTS_NOTIFICATION), + CMD(SCAN_COMPLETE_NOTIFICATION), + CMD(NVM_ACCESS_CMD), + CMD(PHY_CONFIGURATION_CMD), + CMD(CALIB_RES_NOTIF_PHY_DB), + CMD(SET_CALIB_DEFAULT_CMD), + CMD(CALIBRATION_COMPLETE_NOTIFICATION), + CMD(ADD_STA), + CMD(REMOVE_STA), + CMD(LQ_CMD), + CMD(SCAN_OFFLOAD_CONFIG_CMD), + CMD(SCAN_OFFLOAD_REQUEST_CMD), + CMD(SCAN_OFFLOAD_ABORT_CMD), + CMD(SCAN_OFFLOAD_COMPLETE), + CMD(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), + CMD(POWER_TABLE_CMD), + CMD(WEP_KEY), + CMD(REPLY_RX_PHY_CMD), + CMD(REPLY_RX_MPDU_CMD), + CMD(BEACON_TEMPLATE_CMD), + CMD(STATISTICS_NOTIFICATION), + CMD(TX_ANT_CONFIGURATION_CMD), + CMD(D3_CONFIG_CMD), + CMD(PROT_OFFLOAD_CONFIG_CMD), + CMD(OFFLOADS_QUERY_CMD), + CMD(REMOTE_WAKE_CONFIG_CMD), + CMD(WOWLAN_PATTERNS), + CMD(WOWLAN_CONFIGURATION), + CMD(WOWLAN_TSC_RSC_PARAM), + CMD(WOWLAN_TKIP_PARAM), + CMD(WOWLAN_KEK_KCK_MATERIAL), + CMD(WOWLAN_GET_STATUSES), + CMD(WOWLAN_TX_POWER_PER_DB), + CMD(NET_DETECT_CONFIG_CMD), + CMD(NET_DETECT_PROFILES_QUERY_CMD), + CMD(NET_DETECT_PROFILES_CMD), + CMD(NET_DETECT_HOTSPOTS_CMD), + CMD(NET_DETECT_HOTSPOTS_QUERY_CMD), +}; +#undef CMD + +/* this forward declaration can avoid to export the function */ +static void iwl_mvm_async_handlers_wk(struct work_struct *wk); + +static struct iwl_op_mode * +iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_fw *fw, struct dentry *dbgfs_dir) +{ + struct ieee80211_hw *hw; + struct iwl_op_mode *op_mode; + struct iwl_mvm *mvm; + struct iwl_trans_config trans_cfg = {}; + static const u8 no_reclaim_cmds[] = { + TX_CMD, + }; + int err, scan_size; + + switch (cfg->device_family) { + case IWL_DEVICE_FAMILY_6030: + case IWL_DEVICE_FAMILY_6005: + case IWL_DEVICE_FAMILY_7000: + break; + default: + IWL_ERR(trans, "Trying to load mvm on an unsupported device\n"); + return NULL; + } + + /******************************** + * 1. Allocating and configuring HW data + ********************************/ + hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) + + sizeof(struct iwl_mvm), + &iwl_mvm_hw_ops); + if (!hw) + return NULL; + + op_mode = hw->priv; + op_mode->ops = &iwl_mvm_ops; + op_mode->trans = trans; + + mvm = IWL_OP_MODE_GET_MVM(op_mode); + mvm->dev = trans->dev; + mvm->trans = trans; + mvm->cfg = cfg; + mvm->fw = fw; + mvm->hw = hw; + + mutex_init(&mvm->mutex); + spin_lock_init(&mvm->async_handlers_lock); + INIT_LIST_HEAD(&mvm->time_event_list); + INIT_LIST_HEAD(&mvm->async_handlers_list); + spin_lock_init(&mvm->time_event_lock); + + INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); + INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); + INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); + + SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); + + /* + * Populate the state variables that the transport layer needs + * to know about. + */ + trans_cfg.op_mode = op_mode; + trans_cfg.no_reclaim_cmds = no_reclaim_cmds; + trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); + trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K; + + /* TODO: this should really be a TLV */ + if (cfg->device_family == IWL_DEVICE_FAMILY_7000) + trans_cfg.bc_table_dword = true; + + if (!iwlwifi_mod_params.wd_disable) + trans_cfg.queue_watchdog_timeout = cfg->base_params->wd_timeout; + else + trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED; + + trans_cfg.command_names = iwl_mvm_cmd_strings; + + trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; + trans_cfg.cmd_fifo = IWL_MVM_CMD_FIFO; + + snprintf(mvm->hw->wiphy->fw_version, + sizeof(mvm->hw->wiphy->fw_version), + "%s", fw->fw_version); + + /* Configure transport layer */ + iwl_trans_configure(mvm->trans, &trans_cfg); + + trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; + trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); + + /* set up notification wait support */ + iwl_notification_wait_init(&mvm->notif_wait); + + /* Init phy db */ + mvm->phy_db = iwl_phy_db_init(trans); + if (!mvm->phy_db) { + IWL_ERR(mvm, "Cannot init phy_db\n"); + goto out_free; + } + + IWL_INFO(mvm, "Detected %s, REV=0x%X\n", + mvm->cfg->name, mvm->trans->hw_rev); + + err = iwl_trans_start_hw(mvm->trans); + if (err) + goto out_free; + + mutex_lock(&mvm->mutex); + err = iwl_run_init_mvm_ucode(mvm, true); + mutex_unlock(&mvm->mutex); + if (err && !iwlmvm_mod_params.init_dbg) { + IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); + goto out_free; + } + + /* Stop the hw after the ALIVE and NVM has been read */ + if (!iwlmvm_mod_params.init_dbg) + iwl_trans_stop_hw(mvm->trans, false); + + scan_size = sizeof(struct iwl_scan_cmd) + + mvm->fw->ucode_capa.max_probe_length + + (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)); + mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); + if (!mvm->scan_cmd) + goto out_free; + + err = iwl_mvm_mac_setup_register(mvm); + if (err) + goto out_free; + + err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir); + if (err) + goto out_unregister; + + return op_mode; + + out_unregister: + ieee80211_unregister_hw(mvm->hw); + out_free: + iwl_phy_db_free(mvm->phy_db); + kfree(mvm->scan_cmd); + kfree(mvm->eeprom_blob); + iwl_trans_stop_hw(trans, true); + ieee80211_free_hw(mvm->hw); + return NULL; +} + +static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + int i; + + iwl_mvm_leds_exit(mvm); + + ieee80211_unregister_hw(mvm->hw); + + kfree(mvm->scan_cmd); + + iwl_trans_stop_hw(mvm->trans, true); + + iwl_phy_db_free(mvm->phy_db); + mvm->phy_db = NULL; + + kfree(mvm->eeprom_blob); + iwl_free_nvm_data(mvm->nvm_data); + for (i = 0; i < NVM_NUM_OF_SECTIONS; i++) + kfree(mvm->nvm_sections[i].data); + + ieee80211_free_hw(mvm->hw); +} + +struct iwl_async_handler_entry { + struct list_head list; + struct iwl_rx_cmd_buffer rxb; + int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +}; + +void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm) +{ + struct iwl_async_handler_entry *entry, *tmp; + + spin_lock_bh(&mvm->async_handlers_lock); + list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { + iwl_free_rxb(&entry->rxb); + list_del(&entry->list); + kfree(entry); + } + spin_unlock_bh(&mvm->async_handlers_lock); +} + +static void iwl_mvm_async_handlers_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = + container_of(wk, struct iwl_mvm, async_handlers_wk); + struct iwl_async_handler_entry *entry, *tmp; + struct list_head local_list; + + INIT_LIST_HEAD(&local_list); + + /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */ + mutex_lock(&mvm->mutex); + + /* + * Sync with Rx path with a lock. Remove all the entries from this list, + * add them to a local one (lock free), and then handle them. + */ + spin_lock_bh(&mvm->async_handlers_lock); + list_splice_init(&mvm->async_handlers_list, &local_list); + spin_unlock_bh(&mvm->async_handlers_lock); + + list_for_each_entry_safe(entry, tmp, &local_list, list) { + if (entry->fn(mvm, &entry->rxb, NULL)) + IWL_WARN(mvm, + "returned value from ASYNC handlers are ignored\n"); + iwl_free_rxb(&entry->rxb); + list_del(&entry->list); + kfree(entry); + } + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + u8 i; + + /* + * Do the notification wait before RX handlers so + * even if the RX handler consumes the RXB we have + * access to it in the notification wait entry. + */ + iwl_notification_wait_notify(&mvm->notif_wait, pkt); + + for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) { + const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i]; + struct iwl_async_handler_entry *entry; + + if (rx_h->cmd_id != pkt->hdr.cmd) + continue; + + if (!rx_h->async) + return rx_h->fn(mvm, rxb, cmd); + + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + /* we can't do much... */ + if (!entry) + return 0; + + entry->rxb._page = rxb_steal_page(rxb); + entry->rxb._offset = rxb->_offset; + entry->rxb._rx_page_order = rxb->_rx_page_order; + entry->fn = rx_h->fn; + spin_lock(&mvm->async_handlers_lock); + list_add_tail(&entry->list, &mvm->async_handlers_list); + spin_unlock(&mvm->async_handlers_lock); + schedule_work(&mvm->async_handlers_wk); + break; + } + + return 0; +} + +static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + int mq = mvm->queue_to_mac80211[queue]; + + if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) + return; + + if (atomic_inc_return(&mvm->queue_stop_count[mq]) > 1) { + IWL_DEBUG_TX_QUEUES(mvm, + "queue %d (mac80211 %d) already stopped\n", + queue, mq); + return; + } + + set_bit(mq, &mvm->transport_queue_stop); + ieee80211_stop_queue(mvm->hw, mq); +} + +static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + int mq = mvm->queue_to_mac80211[queue]; + + if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) + return; + + if (atomic_dec_return(&mvm->queue_stop_count[mq]) > 0) { + IWL_DEBUG_TX_QUEUES(mvm, + "queue %d (mac80211 %d) already awake\n", + queue, mq); + return; + } + + clear_bit(mq, &mvm->transport_queue_stop); + + ieee80211_wake_queue(mvm->hw, mq); +} + +static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + if (state) + set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); + else + clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); + + wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state); +} + +static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + struct ieee80211_tx_info *info; + + info = IEEE80211_SKB_CB(skb); + iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); + ieee80211_free_txskb(mvm->hw, skb); +} + +static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + iwl_mvm_dump_nic_error_log(mvm); + + iwl_abort_notification_waits(&mvm->notif_wait); + + /* + * If we're restarting already, don't cycle restarts. + * If INIT fw asserted, it will likely fail again. + * If WoWLAN fw asserted, don't restart either, mac80211 + * can't recover this since we're already half suspended. + */ + if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + IWL_ERR(mvm, "Firmware error during reconfiguration! Abort.\n"); + } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && + iwlwifi_mod_params.restart_fw) { + /* + * This is a bit racy, but worst case we tell mac80211 about + * a stopped/aborted (sched) scan when that was already done + * which is not a problem. It is necessary to abort any scan + * here because mac80211 requires having the scan cleared + * before restarting. + * We'll reset the scan_status to NONE in restart cleanup in + * the next start() call from mac80211. + */ + switch (mvm->scan_status) { + case IWL_MVM_SCAN_NONE: + break; + case IWL_MVM_SCAN_OS: + ieee80211_scan_completed(mvm->hw, true); + break; + } + + ieee80211_restart_hw(mvm->hw); + } +} + +static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) +{ + WARN_ON(1); +} + +static const struct iwl_op_mode_ops iwl_mvm_ops = { + .start = iwl_op_mode_mvm_start, + .stop = iwl_op_mode_mvm_stop, + .rx = iwl_mvm_rx_dispatch, + .queue_full = iwl_mvm_stop_sw_queue, + .queue_not_full = iwl_mvm_wake_sw_queue, + .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, + .free_skb = iwl_mvm_free_skb, + .nic_error = iwl_mvm_nic_error, + .cmd_queue_full = iwl_mvm_cmd_queue_full, + .nic_config = iwl_mvm_nic_config, +}; diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c new file mode 100644 index 000000000000..b428448f8ddf --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c @@ -0,0 +1,292 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <net/mac80211.h> +#include "fw-api.h" +#include "mvm.h" + +/* Maps the driver specific channel width definition to the the fw values */ +static inline u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) +{ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + return PHY_VHT_CHANNEL_MODE20; + case NL80211_CHAN_WIDTH_40: + return PHY_VHT_CHANNEL_MODE40; + case NL80211_CHAN_WIDTH_80: + return PHY_VHT_CHANNEL_MODE80; + case NL80211_CHAN_WIDTH_160: + return PHY_VHT_CHANNEL_MODE160; + default: + WARN(1, "Invalid channel width=%u", chandef->width); + return PHY_VHT_CHANNEL_MODE20; + } +} + +/* + * Maps the driver specific control channel position (relative to the center + * freq) definitions to the the fw values + */ +static inline u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) +{ + switch (chandef->chan->center_freq - chandef->center_freq1) { + case -70: + return PHY_VHT_CTRL_POS_4_BELOW; + case -50: + return PHY_VHT_CTRL_POS_3_BELOW; + case -30: + return PHY_VHT_CTRL_POS_2_BELOW; + case -10: + return PHY_VHT_CTRL_POS_1_BELOW; + case 10: + return PHY_VHT_CTRL_POS_1_ABOVE; + case 30: + return PHY_VHT_CTRL_POS_2_ABOVE; + case 50: + return PHY_VHT_CTRL_POS_3_ABOVE; + case 70: + return PHY_VHT_CTRL_POS_4_ABOVE; + default: + WARN(1, "Invalid channel definition"); + case 0: + /* + * The FW is expected to check the control channel position only + * when in HT/VHT and the channel width is not 20MHz. Return + * this value as the default one. + */ + return PHY_VHT_CTRL_POS_1_BELOW; + } +} + +/* + * Construct the generic fields of the PHY context command + */ +static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, + struct iwl_phy_context_cmd *cmd, + u32 action, u32 apply_time) +{ + memset(cmd, 0, sizeof(struct iwl_phy_context_cmd)); + + cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, + ctxt->color)); + cmd->action = cpu_to_le32(action); + cmd->apply_time = cpu_to_le32(apply_time); +} + +/* + * Add the phy configuration to the PHY context command + */ +static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, + struct iwl_phy_context_cmd *cmd, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic) +{ + u8 valid_rx_chains, active_cnt, idle_cnt; + + /* Set the channel info data */ + cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? + PHY_BAND_24 : PHY_BAND_5); + + cmd->ci.channel = chandef->chan->hw_value; + cmd->ci.width = iwl_mvm_get_channel_width(chandef); + cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef); + + /* Set rx the chains */ + + /* TODO: + * Need to add on chain noise calibration limitations, and + * BT coex considerations. + */ + valid_rx_chains = mvm->nvm_data->valid_rx_ant; + idle_cnt = chains_static; + active_cnt = chains_dynamic; + + cmd->rxchain_info = cpu_to_le32(valid_rx_chains << + PHY_RX_CHAIN_VALID_POS); + cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); + cmd->rxchain_info |= cpu_to_le32(active_cnt << + PHY_RX_CHAIN_MIMO_CNT_POS); + + cmd->txchain_info = cpu_to_le32(mvm->nvm_data->valid_tx_ant); +} + +/* + * Send a command to apply the current phy configuration. The command is send + * only if something in the configuration changed: in case that this is the + * first time that the phy configuration is applied or in case that the phy + * configuration changed from the previous apply. + */ +static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, + struct iwl_mvm_phy_ctxt *ctxt, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic, + u32 action, u32 apply_time) +{ + struct iwl_phy_context_cmd cmd; + int ret; + + /* Set the command header fields */ + iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time); + + /* Set the command data */ + iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef, + chains_static, chains_dynamic); + + ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC, + sizeof(struct iwl_phy_context_cmd), + &cmd); + if (ret) + IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret); + return ret; +} + + +struct phy_ctx_used_data { + unsigned long used[BITS_TO_LONGS(NUM_PHY_CTX)]; +}; + +static void iwl_mvm_phy_ctx_used_iter(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + void *_data) +{ + struct phy_ctx_used_data *data = _data; + struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; + + __set_bit(phy_ctxt->id, data->used); +} + +/* + * Send a command to add a PHY context based on the current HW configuration. + */ +int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic) +{ + struct phy_ctx_used_data data = { + .used = { }, + }; + + /* + * If this is a regular PHY context (not the ROC one) + * skip the ROC PHY context's ID. + */ + if (ctxt != &mvm->phy_ctxt_roc) + __set_bit(mvm->phy_ctxt_roc.id, data.used); + + lockdep_assert_held(&mvm->mutex); + ctxt->color++; + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + ieee80211_iter_chan_contexts_atomic( + mvm->hw, iwl_mvm_phy_ctx_used_iter, &data); + + ctxt->id = find_first_zero_bit(data.used, NUM_PHY_CTX); + if (WARN_ONCE(ctxt->id == NUM_PHY_CTX, + "Failed to init PHY context - no free ID!\n")) + return -EIO; + } + + ctxt->channel = chandef->chan; + return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, + chains_static, chains_dynamic, + FW_CTXT_ACTION_ADD, 0); +} + +/* + * Send a command to modify the PHY context based on the current HW + * configuration. Note that the function does not check that the configuration + * changed. + */ +int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic) +{ + lockdep_assert_held(&mvm->mutex); + + ctxt->channel = chandef->chan; + return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, + chains_static, chains_dynamic, + FW_CTXT_ACTION_MODIFY, 0); +} + +/* + * Send a command to the FW to remove the given phy context. + * Once the command is sent, regardless of success or failure, the context is + * marked as invalid + */ +void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) +{ + struct iwl_phy_context_cmd cmd; + int ret; + + lockdep_assert_held(&mvm->mutex); + + iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, FW_CTXT_ACTION_REMOVE, 0); + ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC, + sizeof(struct iwl_phy_context_cmd), + &cmd); + if (ret) + IWL_ERR(mvm, "Failed to send PHY remove: ctxt id=%d\n", + ctxt->id); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c new file mode 100644 index 000000000000..5a92a4978795 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -0,0 +1,207 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> + +#include <net/mac80211.h> + +#include "iwl-debug.h" +#include "mvm.h" +#include "iwl-modparams.h" +#include "fw-api-power.h" + +#define POWER_KEEP_ALIVE_PERIOD_SEC 25 + +static void iwl_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_powertable_cmd *cmd) +{ + struct ieee80211_hw *hw = mvm->hw; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_channel *chan; + int dtimper, dtimper_msec; + int keep_alive; + bool radar_detect = false; + + cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + cmd->action = cpu_to_le32(FW_CTXT_ACTION_MODIFY); + + if ((!vif->bss_conf.ps) || + (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)) + return; + + cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); + + dtimper = hw->conf.ps_dtim_period ?: 1; + + /* Check if radar detection is required on current channel */ + rcu_read_lock(); + chanctx_conf = rcu_dereference(vif->chanctx_conf); + WARN_ON(!chanctx_conf); + if (chanctx_conf) { + chan = chanctx_conf->def.chan; + radar_detect = chan->flags & IEEE80211_CHAN_RADAR; + } + rcu_read_unlock(); + + /* Check skip over DTIM conditions */ + if (!radar_detect && (dtimper <= 10) && + (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP)) { + cmd->flags |= cpu_to_le16(POWER_FLAGS_SLEEP_OVER_DTIM_MSK); + cmd->num_skip_dtim = 2; + } + + /* Check that keep alive period is at least 3 * DTIM */ + dtimper_msec = dtimper * vif->bss_conf.beacon_int; + keep_alive = max_t(int, 3 * dtimper_msec, + MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC); + keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC); + + cmd->keep_alive_seconds = cpu_to_le16(keep_alive); + + if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP) { + /* TODO: Also for D3 (device sleep / WoWLAN) */ + cmd->rx_data_timeout = cpu_to_le32(10); + cmd->tx_data_timeout = cpu_to_le32(10); + } else { + cmd->rx_data_timeout = cpu_to_le32(50); + cmd->tx_data_timeout = cpu_to_le32(50); + } +} + +int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_powertable_cmd cmd = {}; + + if (!iwlwifi_mod_params.power_save) { + IWL_DEBUG_POWER(mvm, "Power management is not allowed\n"); + return 0; + } + + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return 0; + + iwl_power_build_cmd(mvm, vif, &cmd); + + IWL_DEBUG_POWER(mvm, + "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n", + cmd.id_and_color, iwlmvm_mod_params.power_scheme, + le16_to_cpu(cmd.flags)); + + if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) { + IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", + le16_to_cpu(cmd.keep_alive_seconds)); + IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", + le32_to_cpu(cmd.rx_data_timeout)); + IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", + le32_to_cpu(cmd.tx_data_timeout)); + IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n", + le32_to_cpu(cmd.rx_data_timeout_uapsd)); + IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", + le32_to_cpu(cmd.tx_data_timeout_uapsd)); + IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", + cmd.lprx_rssi_threshold); + IWL_DEBUG_POWER(mvm, "DTIMs to skip = %u\n", cmd.num_skip_dtim); + } + + return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, + sizeof(cmd), &cmd); +} + +int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_powertable_cmd cmd = {}; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (!iwlwifi_mod_params.power_save) { + IWL_DEBUG_POWER(mvm, "Power management is not allowed\n"); + return 0; + } + + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return 0; + + cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + cmd.action = cpu_to_le32(FW_CTXT_ACTION_MODIFY); + + IWL_DEBUG_POWER(mvm, + "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n", + cmd.id_and_color, iwlmvm_mod_params.power_scheme, + le16_to_cpu(cmd.flags)); + + return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC, + sizeof(cmd), &cmd); +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +void iwl_power_get_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_powertable_cmd *cmd) +{ + iwl_power_build_cmd(mvm, vif, cmd); +} +#endif /* CONFIG_IWLWIFI_DEBUGFS */ diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c new file mode 100644 index 000000000000..925628468146 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/quota.c @@ -0,0 +1,197 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <net/mac80211.h> +#include "fw-api.h" +#include "mvm.h" + +struct iwl_mvm_quota_iterator_data { + int n_interfaces[MAX_BINDINGS]; + int colors[MAX_BINDINGS]; + struct ieee80211_vif *new_vif; +}; + +static void iwl_mvm_quota_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_quota_iterator_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u16 id; + + /* + * We'll account for the new interface (if any) below, + * skip it here in case we're not called from within + * the add_interface callback (otherwise it won't show + * up in iteration) + */ + if (vif == data->new_vif) + return; + + if (!mvmvif->phy_ctxt) + return; + + /* currently, PHY ID == binding ID */ + id = mvmvif->phy_ctxt->id; + + /* need at least one binding per PHY */ + BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS); + + if (WARN_ON_ONCE(id >= MAX_BINDINGS)) + return; + + if (data->colors[id] < 0) + data->colors[id] = mvmvif->phy_ctxt->color; + else + WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + if (vif->bss_conf.assoc) + data->n_interfaces[id]++; + break; + case NL80211_IFTYPE_AP: + if (mvmvif->ap_active) + data->n_interfaces[id]++; + break; + case NL80211_IFTYPE_MONITOR: + data->n_interfaces[id]++; + break; + case NL80211_IFTYPE_P2P_DEVICE: + break; + case NL80211_IFTYPE_ADHOC: + if (vif->bss_conf.ibss_joined) + data->n_interfaces[id]++; + break; + default: + WARN_ON_ONCE(1); + break; + } +} + +int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif) +{ + struct iwl_time_quota_cmd cmd; + int i, idx, ret, num_active_bindings, quota, quota_rem; + struct iwl_mvm_quota_iterator_data data = { + .n_interfaces = {}, + .colors = { -1, -1, -1, -1 }, + .new_vif = newvif, + }; + + /* update all upon completion */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return 0; + + BUILD_BUG_ON(data.colors[MAX_BINDINGS - 1] != -1); + + lockdep_assert_held(&mvm->mutex); + + memset(&cmd, 0, sizeof(cmd)); + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_quota_iterator, &data); + if (newvif) { + data.new_vif = NULL; + iwl_mvm_quota_iterator(&data, newvif->addr, newvif); + } + + /* + * The FW's scheduling session consists of + * IWL_MVM_MAX_QUOTA fragments. Divide these fragments + * equally between all the bindings that require quota + */ + num_active_bindings = 0; + for (i = 0; i < MAX_BINDINGS; i++) { + cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID); + if (data.n_interfaces[i] > 0) + num_active_bindings++; + } + + if (!num_active_bindings) + goto send_cmd; + + quota = IWL_MVM_MAX_QUOTA / num_active_bindings; + quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings; + + for (idx = 0, i = 0; i < MAX_BINDINGS; i++) { + if (data.n_interfaces[i] <= 0) + continue; + + cmd.quotas[idx].id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i])); + cmd.quotas[idx].quota = cpu_to_le32(quota); + cmd.quotas[idx].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); + idx++; + } + + /* Give the remainder of the session to the first binding */ + le32_add_cpu(&cmd.quotas[0].quota, quota_rem); + +send_cmd: + ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC, + sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "Failed to send quota: %d\n", ret); + return ret; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c new file mode 100644 index 000000000000..56b636d9ab30 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -0,0 +1,3080 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <net/mac80211.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> + +#include <linux/workqueue.h> +#include "rs.h" +#include "fw-api.h" +#include "sta.h" +#include "iwl-op-mode.h" +#include "mvm.h" + +#define RS_NAME "iwl-mvm-rs" + +#define NUM_TRY_BEFORE_ANT_TOGGLE 1 +#define IWL_NUMBER_TRY 1 +#define IWL_HT_NUMBER_TRY 3 + +#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */ +#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */ +#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */ + +/* max allowed rate miss before sync LQ cmd */ +#define IWL_MISSED_RATE_MAX 15 +/* max time to accum history 2 seconds */ +#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ) + +static u8 rs_ht_to_legacy[] = { + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX +}; + +static const u8 ant_toggle_lookup[] = { + /*ANT_NONE -> */ ANT_NONE, + /*ANT_A -> */ ANT_B, + /*ANT_B -> */ ANT_C, + /*ANT_AB -> */ ANT_BC, + /*ANT_C -> */ ANT_A, + /*ANT_AC -> */ ANT_AB, + /*ANT_BC -> */ ANT_AC, + /*ANT_ABC -> */ ANT_ABC, +}; + +#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_SISO_##s##M_PLCP, \ + IWL_RATE_MIMO2_##s##M_PLCP,\ + IWL_RATE_MIMO3_##s##M_PLCP,\ + IWL_RATE_##r##M_IEEE, \ + IWL_RATE_##ip##M_INDEX, \ + IWL_RATE_##in##M_INDEX, \ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX, \ + IWL_RATE_##pp##M_INDEX, \ + IWL_RATE_##np##M_INDEX } + +/* + * Parameter order: + * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ + IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ + IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ + /* FIXME:RS: ^^ should be INV (legacy) */ +}; + +static inline u8 rs_extract_rate(u32 rate_n_flags) +{ + /* also works for HT because bits 7:6 are zero there */ + return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK); +} + +static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) +{ + int idx = 0; + + /* HT rate format */ + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = rs_extract_rate(rate_n_flags); + + if (idx >= IWL_RATE_MIMO3_6M_PLCP) + idx = idx - IWL_RATE_MIMO3_6M_PLCP; + else if (idx >= IWL_RATE_MIMO2_6M_PLCP) + idx = idx - IWL_RATE_MIMO2_6M_PLCP; + + idx += IWL_FIRST_OFDM_RATE; + /* skip 9M not supported in ht*/ + if (idx >= IWL_RATE_9M_INDEX) + idx += 1; + if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE)) + return idx; + + /* legacy rate format, search for match in table */ + } else { + for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++) + if (iwl_rates[idx].plcp == + rs_extract_rate(rate_n_flags)) + return idx; + } + + return -1; +} + +static void rs_rate_scale_perform(struct iwl_mvm *mvm, + struct sk_buff *skb, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta); +static void rs_fill_link_cmd(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, u32 rate_n_flags); +static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search); + + +#ifdef CONFIG_MAC80211_DEBUGFS +static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index); +#else +static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index) +{} +#endif + +/** + * The following tables contain the expected throughput metrics for all rates + * + * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits + * + * where invalid entries are zeros. + * + * CCK rates are only valid in legacy table and will only be used in G + * (2.4 GHz) band. + */ + +static s32 expected_tpt_legacy[IWL_RATE_COUNT] = { + 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0 +}; + +static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */ + {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */ + {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */ + {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */ +}; + +static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ + {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ + {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */ + {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */ +}; + +static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */ + {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */ + {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */ + {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/ +}; + +static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ + {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ + {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */ + {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */ +}; + +static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */ + {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */ + {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */ + {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */ +}; + +static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */ + {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */ + {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */ + {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */ +}; + +/* mbps, mcs */ +static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { + { "1", "BPSK DSSS"}, + { "2", "QPSK DSSS"}, + {"5.5", "BPSK CCK"}, + { "11", "QPSK CCK"}, + { "6", "BPSK 1/2"}, + { "9", "BPSK 1/2"}, + { "12", "QPSK 1/2"}, + { "18", "QPSK 3/4"}, + { "24", "16QAM 1/2"}, + { "36", "16QAM 3/4"}, + { "48", "64QAM 2/3"}, + { "54", "64QAM 3/4"}, + { "60", "64QAM 5/6"}, +}; + +#define MCS_INDEX_PER_STREAM (8) + +static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = IWL_INVALID_VALUE; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; + window->stamp = 0; +} + +static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) +{ + return (ant_type & valid_antenna) == ant_type; +} + +/* + * removes the old data from the statistics. All data that is older than + * TID_MAX_TIME_DIFF, will be deleted. + */ +static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time) +{ + /* The oldest age we want to keep */ + u32 oldest_time = curr_time - TID_MAX_TIME_DIFF; + + while (tl->queue_count && + (tl->time_stamp < oldest_time)) { + tl->total -= tl->packet_count[tl->head]; + tl->packet_count[tl->head] = 0; + tl->time_stamp += TID_QUEUE_CELL_SPACING; + tl->queue_count--; + tl->head++; + if (tl->head >= TID_QUEUE_MAX_SIZE) + tl->head = 0; + } +} + +/* + * increment traffic load value for tid and also remove + * any old values if passed the certain time period + */ +static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data, + struct ieee80211_hdr *hdr) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + struct iwl_traffic_load *tl = NULL; + u8 tid; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } else { + return IWL_MAX_TID_COUNT; + } + + if (unlikely(tid >= IWL_MAX_TID_COUNT)) + return IWL_MAX_TID_COUNT; + + tl = &lq_data->load[tid]; + + curr_time -= curr_time % TID_ROUND_VALUE; + + /* Happens only for the first packet. Initialize the data */ + if (!(tl->queue_count)) { + tl->total = 1; + tl->time_stamp = curr_time; + tl->queue_count = 1; + tl->head = 0; + tl->packet_count[0] = 1; + return IWL_MAX_TID_COUNT; + } + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (index >= TID_QUEUE_MAX_SIZE) + rs_tl_rm_old_stats(tl, curr_time); + + index = (tl->head + index) % TID_QUEUE_MAX_SIZE; + tl->packet_count[index] = tl->packet_count[index] + 1; + tl->total = tl->total + 1; + + if ((index + 1) > tl->queue_count) + tl->queue_count = index + 1; + + return tid; +} + +#ifdef CONFIG_MAC80211_DEBUGFS +/** + * Program the device to use fixed rate for frame transmit + * This is for debugging/testing only + * once the device start use fixed rate, we need to reload the module + * to being back the normal operation. + */ +static void rs_program_fix_rate(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta) +{ + lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ + lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + + IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n", + lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); + + if (lq_sta->dbg_fixed_rate) { + rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); + iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false); + } +} +#endif + +/* + get the traffic load value for tid +*/ +static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + struct iwl_traffic_load *tl = NULL; + + if (tid >= IWL_MAX_TID_COUNT) + return 0; + + tl = &(lq_data->load[tid]); + + curr_time -= curr_time % TID_ROUND_VALUE; + + if (!(tl->queue_count)) + return 0; + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (index >= TID_QUEUE_MAX_SIZE) + rs_tl_rm_old_stats(tl, curr_time); + + return tl->total; +} + +static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_data, u8 tid, + struct ieee80211_sta *sta) +{ + int ret = -EAGAIN; + u32 load; + + load = rs_tl_get_load(lq_data, tid); + + if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) { + IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n", + sta->addr, tid); + ret = ieee80211_start_tx_ba_session(sta, tid, 5000); + if (ret == -EAGAIN) { + /* + * driver and mac80211 is out of sync + * this might be cause by reloading firmware + * stop the tx ba session here + */ + IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n", + tid); + ieee80211_stop_tx_ba_session(sta, tid); + } + } else { + IWL_DEBUG_HT(mvm, + "Aggregation not enabled for tid %d because load = %u\n", + tid, load); + } + return ret; +} + +static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, u8 tid, + struct iwl_lq_sta *lq_data, + struct ieee80211_sta *sta) +{ + if (tid < IWL_MAX_TID_COUNT) + rs_tl_turn_on_agg_for_tid(mvm, lq_data, tid, sta); + else + IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n", + tid, IWL_MAX_TID_COUNT); +} + +static inline int get_num_of_ant_from_rate(u32 rate_n_flags) +{ + return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_C_MSK); +} + +/* + * Static function to get the expected throughput from an iwl_scale_tbl_info + * that wraps a NULL pointer check + */ +static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) +{ + if (tbl->expected_tpt) + return tbl->expected_tpt[rs_index]; + return 0; +} + +/** + * rs_collect_tx_data - Update the success/failure sliding window + * + * We keep a sliding window of the last 62 packets transmitted + * at this rate. window->data contains the bitmask of successful + * packets. + */ +static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes) +{ + struct iwl_rate_scale_data *window = NULL; + static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); + s32 fail_count, tpt; + + if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) + return -EINVAL; + + /* Select window for current tx bit rate */ + window = &(tbl->win[scale_index]); + + /* Get expected throughput */ + tpt = get_expected_tpt(tbl, scale_index); + + /* + * Keep track of only the latest 62 tx frame attempts in this rate's + * history window; anything older isn't really relevant any more. + * If we have filled up the sliding window, drop the oldest attempt; + * if the oldest attempt (highest bit in bitmap) shows "success", + * subtract "1" from the success counter (this is the main reason + * we keep these bitmaps!). + */ + while (attempts > 0) { + if (window->counter >= IWL_RATE_MAX_WINDOW) { + /* remove earliest */ + window->counter = IWL_RATE_MAX_WINDOW - 1; + + if (window->data & mask) { + window->data &= ~mask; + window->success_counter--; + } + } + + /* Increment frames-attempted counter */ + window->counter++; + + /* Shift bitmap by one frame to throw away oldest history */ + window->data <<= 1; + + /* Mark the most recent #successes attempts as successful */ + if (successes > 0) { + window->success_counter++; + window->data |= 0x1; + successes--; + } + + attempts--; + } + + /* Calculate current success ratio, avoid divide-by-0! */ + if (window->counter > 0) + window->success_ratio = 128 * (100 * window->success_counter) + / window->counter; + else + window->success_ratio = IWL_INVALID_VALUE; + + fail_count = window->counter - window->success_counter; + + /* Calculate average throughput, if we have enough history. */ + if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || + (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) + window->average_tpt = (window->success_ratio * tpt + 64) / 128; + else + window->average_tpt = IWL_INVALID_VALUE; + + /* Tag this window as having been updated */ + window->stamp = jiffies; + + return 0; +} + +/* + * Fill uCode API rate_n_flags field, based on "search" or "active" table. + */ +/* FIXME:RS:remove this function and put the flags statically in the table */ +static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm, + struct iwl_scale_tbl_info *tbl, + int index, u8 use_green) +{ + u32 rate_n_flags = 0; + + if (is_legacy(tbl->lq_type)) { + rate_n_flags = iwl_rates[index].plcp; + if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) + rate_n_flags |= RATE_MCS_CCK_MSK; + } else if (is_Ht(tbl->lq_type)) { + if (index > IWL_LAST_OFDM_RATE) { + IWL_ERR(mvm, "Invalid HT rate index %d\n", index); + index = IWL_LAST_OFDM_RATE; + } + rate_n_flags = RATE_MCS_HT_MSK; + + if (is_siso(tbl->lq_type)) + rate_n_flags |= iwl_rates[index].plcp_siso; + else if (is_mimo2(tbl->lq_type)) + rate_n_flags |= iwl_rates[index].plcp_mimo2; + else + rate_n_flags |= iwl_rates[index].plcp_mimo3; + } else { + IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type); + } + + rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) & + RATE_MCS_ANT_ABC_MSK); + + if (is_Ht(tbl->lq_type)) { + if (tbl->is_ht40) + rate_n_flags |= RATE_MCS_CHAN_WIDTH_40; + if (tbl->is_SGI) + rate_n_flags |= RATE_MCS_SGI_MSK; + + if (use_green) { + rate_n_flags |= RATE_HT_MCS_GF_MSK; + if (is_siso(tbl->lq_type) && tbl->is_SGI) { + rate_n_flags &= ~RATE_MCS_SGI_MSK; + IWL_ERR(mvm, "GF was set with SGI:SISO\n"); + } + } + } + return rate_n_flags; +} + +/* + * Interpret uCode API's rate_n_flags format, + * fill "search" or "active" tx mode table. + */ +static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags, + enum ieee80211_band band, + struct iwl_scale_tbl_info *tbl, + int *rate_idx) +{ + u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); + u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags); + u8 mcs; + + memset(tbl, 0, sizeof(struct iwl_scale_tbl_info)); + *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); + + if (*rate_idx == IWL_RATE_INVALID) { + *rate_idx = -1; + return -EINVAL; + } + tbl->is_SGI = 0; /* default legacy setup */ + tbl->is_ht40 = 0; + tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS); + tbl->lq_type = LQ_NONE; + tbl->max_search = IWL_MAX_SEARCH; + + /* legacy rate format */ + if (!(rate_n_flags & RATE_MCS_HT_MSK)) { + if (num_of_ant == 1) { + if (band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + } + /* HT rate format */ + } else { + if (rate_n_flags & RATE_MCS_SGI_MSK) + tbl->is_SGI = 1; + + if (rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */ + tbl->is_ht40 = 1; + + mcs = rs_extract_rate(rate_n_flags); + + /* SISO */ + if (mcs <= IWL_RATE_SISO_60M_PLCP) { + if (num_of_ant == 1) + tbl->lq_type = LQ_SISO; /*else NONE*/ + /* MIMO2 */ + } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) { + if (num_of_ant == 2) + tbl->lq_type = LQ_MIMO2; + /* MIMO3 */ + } else { + if (num_of_ant == 3) { + tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; + tbl->lq_type = LQ_MIMO3; + } + } + } + return 0; +} + +/* switch to another antenna/antennas and return 1 */ +/* if no other valid antenna found, return 0 */ +static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, + struct iwl_scale_tbl_info *tbl) +{ + u8 new_ant_type; + + if (!tbl->ant_type || tbl->ant_type > ANT_ABC) + return 0; + + if (!rs_is_valid_ant(valid_ant, tbl->ant_type)) + return 0; + + new_ant_type = ant_toggle_lookup[tbl->ant_type]; + + while ((new_ant_type != tbl->ant_type) && + !rs_is_valid_ant(valid_ant, new_ant_type)) + new_ant_type = ant_toggle_lookup[new_ant_type]; + + if (new_ant_type == tbl->ant_type) + return 0; + + tbl->ant_type = new_ant_type; + *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK; + *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS; + return 1; +} + +/** + * Green-field mode is valid if the station supports it and + * there are no non-GF stations present in the BSS. + */ +static bool rs_use_green(struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv; + + bool use_green = !(sta_priv->vif->bss_conf.ht_operation_mode & + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + + return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && use_green; +} + +/** + * rs_get_supported_rates - get the available rates + * + * if management frame or broadcast frame only return + * basic available rates. + * + */ +static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta, + struct ieee80211_hdr *hdr, + enum iwl_table_type rate_type) +{ + if (is_legacy(rate_type)) { + return lq_sta->active_legacy_rate; + } else { + if (is_siso(rate_type)) + return lq_sta->active_siso_rate; + else if (is_mimo2(rate_type)) + return lq_sta->active_mimo2_rate; + else + return lq_sta->active_mimo3_rate; + } +} + +static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask, + int rate_type) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + + /* 802.11A or ht walks to the next literal adjacent rate in + * the rate table */ + if (is_a_band(rate_type) || !is_legacy(rate_type)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + low = iwl_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + IWL_DEBUG_RATE(mvm, "Skipping masked lower rate: %d\n", low); + } + + high = index; + while (high != IWL_RATE_INVALID) { + high = iwl_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + IWL_DEBUG_RATE(mvm, "Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + u8 scale_index, u8 ht_possible) +{ + s32 low; + u16 rate_mask; + u16 high_low; + u8 switch_to_legacy = 0; + u8 is_green = lq_sta->is_green; + struct iwl_mvm *mvm = lq_sta->drv; + + /* check if we need to switch from HT to legacy rates. + * assumption is that mandatory rates (1Mbps or 6Mbps) + * are always supported (spec demand) */ + if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { + switch_to_legacy = 1; + scale_index = rs_ht_to_legacy[scale_index]; + if (lq_sta->band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + + if (num_of_ant(tbl->ant_type) > 1) + tbl->ant_type = + first_antenna(mvm->nvm_data->valid_tx_ant); + + tbl->is_ht40 = 0; + tbl->is_SGI = 0; + tbl->max_search = IWL_MAX_SEARCH; + } + + rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type); + + /* Mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + /* supp_rates has no CCK bits in A mode */ + if (lq_sta->band == IEEE80211_BAND_5GHZ) + rate_mask = (u16)(rate_mask & + (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_mask = (u16)(rate_mask & lq_sta->supp_rates); + } + + /* If we switched from HT to legacy, check current rate */ + if (switch_to_legacy && (rate_mask & (1 << scale_index))) { + low = scale_index; + goto out; + } + + high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask, + tbl->lq_type); + low = high_low & 0xff; + + if (low == IWL_RATE_INVALID) + low = scale_index; + +out: + return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green); +} + +/* + * Simple function to compare two rate scale table types + */ +static bool table_type_matches(struct iwl_scale_tbl_info *a, + struct iwl_scale_tbl_info *b) +{ + return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && + (a->is_SGI == b->is_SGI); +} + +/* + * mac80211 sends us Tx status + */ +static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + int legacy_success; + int retries; + int rs_index, mac_index, i; + struct iwl_lq_sta *lq_sta = priv_sta; + struct iwl_lq_cmd *table; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r; + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + enum mac80211_rate_control_flags mac_flags; + u32 tx_rate; + struct iwl_scale_tbl_info tbl_type; + struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; + + IWL_DEBUG_RATE_LIMIT(mvm, + "get frame ack response, update rate scale window\n"); + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (!lq_sta) { + IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n"); + return; + } else if (!lq_sta->drv) { + IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n"); + return; + } + + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + /* This packet was aggregated but doesn't carry status info */ + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) + return; + + /* + * Ignore this Tx frame response if its initial rate doesn't match + * that of latest Link Quality command. There may be stragglers + * from a previous Link Quality command, but we're no longer interested + * in those; they're either from the "active" mode while we're trying + * to check "search" mode, or a prior "search" mode after we've moved + * to a new "search" mode (which might become the new "active" mode). + */ + table = &lq_sta->lq; + tx_rate = le32_to_cpu(table->rs_table[0]); + rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type, &rs_index); + if (info->band == IEEE80211_BAND_5GHZ) + rs_index -= IWL_FIRST_OFDM_RATE; + mac_flags = info->status.rates[0].flags; + mac_index = info->status.rates[0].idx; + /* For HT packets, map MCS to PLCP */ + if (mac_flags & IEEE80211_TX_RC_MCS) { + /* Remove # of streams */ + mac_index &= RATE_HT_MCS_RATE_CODE_MSK; + if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE)) + mac_index++; + /* + * mac80211 HT index is always zero-indexed; we need to move + * HT OFDM rates after CCK rates in 2.4 GHz band + */ + if (info->band == IEEE80211_BAND_2GHZ) + mac_index += IWL_FIRST_OFDM_RATE; + } + /* Here we actually compare this rate to the latest LQ command */ + if ((mac_index < 0) || + (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || + (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || + (tbl_type.ant_type != info->status.antenna) || + (!!(tx_rate & RATE_MCS_HT_MSK) != + !!(mac_flags & IEEE80211_TX_RC_MCS)) || + (!!(tx_rate & RATE_HT_MCS_GF_MSK) != + !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || + (rs_index != mac_index)) { + IWL_DEBUG_RATE(mvm, + "initial rate %d does not match %d (0x%x)\n", + mac_index, rs_index, tx_rate); + /* + * Since rates mis-match, the last LQ command may have failed. + * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with + * ... driver. + */ + lq_sta->missed_rate_counter++; + if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { + lq_sta->missed_rate_counter = 0; + iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false); + } + /* Regardless, ignore this status info for outdated rate */ + return; + } else + /* Rate did match, so reset the missed_rate_counter */ + lq_sta->missed_rate_counter = 0; + + /* Figure out if rate scale algorithm is in active or search table */ + if (table_type_matches(&tbl_type, + &(lq_sta->lq_info[lq_sta->active_tbl]))) { + curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + } else if (table_type_matches( + &tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) { + curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + } else { + IWL_DEBUG_RATE(mvm, + "Neither active nor search matches tx rate\n"); + tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + IWL_DEBUG_RATE(mvm, "active- lq:%x, ant:%x, SGI:%d\n", + tmp_tbl->lq_type, tmp_tbl->ant_type, + tmp_tbl->is_SGI); + tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + IWL_DEBUG_RATE(mvm, "search- lq:%x, ant:%x, SGI:%d\n", + tmp_tbl->lq_type, tmp_tbl->ant_type, + tmp_tbl->is_SGI); + IWL_DEBUG_RATE(mvm, "actual- lq:%x, ant:%x, SGI:%d\n", + tbl_type.lq_type, tbl_type.ant_type, + tbl_type.is_SGI); + /* + * no matching table found, let's by-pass the data collection + * and continue to perform rate scale to find the rate table + */ + rs_stay_in_table(lq_sta, true); + goto done; + } + + /* + * Updating the frame history depends on whether packets were + * aggregated. + * + * For aggregation, all packets were transmitted at the same rate, the + * first index into rate scale table. + */ + if (info->flags & IEEE80211_TX_STAT_AMPDU) { + tx_rate = le32_to_cpu(table->rs_table[0]); + rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type, + &rs_index); + rs_collect_tx_data(curr_tbl, rs_index, + info->status.ampdu_len, + info->status.ampdu_ack_len); + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += info->status.ampdu_ack_len; + lq_sta->total_failed += (info->status.ampdu_len - + info->status.ampdu_ack_len); + } + } else { + /* + * For legacy, update frame history with for each Tx retry. + */ + retries = info->status.rates[0].count - 1; + /* HW doesn't send more than 15 retries */ + retries = min(retries, 15); + + /* The last transmission may have been successful */ + legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); + /* Collect data for each rate used during failed TX attempts */ + for (i = 0; i <= retries; ++i) { + tx_rate = le32_to_cpu(table->rs_table[i]); + rs_get_tbl_info_from_mcs(tx_rate, info->band, + &tbl_type, &rs_index); + /* + * Only collect stats if retried rate is in the same RS + * table as active/search. + */ + if (table_type_matches(&tbl_type, curr_tbl)) + tmp_tbl = curr_tbl; + else if (table_type_matches(&tbl_type, other_tbl)) + tmp_tbl = other_tbl; + else + continue; + rs_collect_tx_data(tmp_tbl, rs_index, 1, + i < retries ? 0 : legacy_success); + } + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += legacy_success; + lq_sta->total_failed += retries + (1 - legacy_success); + } + } + /* The last TX rate is cached in lq_sta; it's set in if/else above */ + lq_sta->last_rate_n_flags = tx_rate; +done: + /* See if there's a better rate or modulation mode to try. */ + if (sta && sta->supp_rates[sband->band]) + rs_rate_scale_perform(mvm, skb, sta, lq_sta); +} + +/* + * Begin a period of staying with a selected modulation mode. + * Set "stay_in_tbl" flag to prevent any mode switches. + * Set frame tx success limits according to legacy vs. high-throughput, + * and reset overall (spanning all rates) tx success history statistics. + * These control how long we stay using same modulation mode before + * searching for a new mode. + */ +static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy, + struct iwl_lq_sta *lq_sta) +{ + IWL_DEBUG_RATE(mvm, "we are staying in the same table\n"); + lq_sta->stay_in_tbl = 1; /* only place this gets set */ + if (is_legacy) { + lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT; + } else { + lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT; + } + lq_sta->table_count = 0; + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = jiffies; + lq_sta->action_counter = 0; +} + +/* + * Find correct throughput table for given mode of modulation + */ +static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl) +{ + /* Used to choose among HT tables */ + s32 (*ht_tbl_pointer)[IWL_RATE_COUNT]; + + /* Check for invalid LQ type */ + if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Legacy rates have only one table */ + if (is_legacy(tbl->lq_type)) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Choose among many HT tables depending on number of streams + * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation + * status */ + if (is_siso(tbl->lq_type) && !tbl->is_ht40) + ht_tbl_pointer = expected_tpt_siso20MHz; + else if (is_siso(tbl->lq_type)) + ht_tbl_pointer = expected_tpt_siso40MHz; + else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40) + ht_tbl_pointer = expected_tpt_mimo2_20MHz; + else if (is_mimo2(tbl->lq_type)) + ht_tbl_pointer = expected_tpt_mimo2_40MHz; + else if (is_mimo3(tbl->lq_type) && !tbl->is_ht40) + ht_tbl_pointer = expected_tpt_mimo3_20MHz; + else /* if (is_mimo3(tbl->lq_type)) <-- must be true */ + ht_tbl_pointer = expected_tpt_mimo3_40MHz; + + if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ + tbl->expected_tpt = ht_tbl_pointer[0]; + else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */ + tbl->expected_tpt = ht_tbl_pointer[1]; + else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */ + tbl->expected_tpt = ht_tbl_pointer[2]; + else /* AGG+SGI */ + tbl->expected_tpt = ht_tbl_pointer[3]; +} + +/* + * Find starting rate for new "search" high-throughput mode of modulation. + * Goal is to find lowest expected rate (under perfect conditions) that is + * above the current measured throughput of "active" mode, to give new mode + * a fair chance to prove itself without too many challenges. + * + * This gets called when transitioning to more aggressive modulation + * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive + * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need + * to decrease to match "active" throughput. When moving from MIMO to SISO, + * bit rate will typically need to increase, but not if performance was bad. + */ +static s32 rs_get_best_rate(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, /* "search" */ + u16 rate_mask, s8 index) +{ + /* "active" values */ + struct iwl_scale_tbl_info *active_tbl = + &(lq_sta->lq_info[lq_sta->active_tbl]); + s32 active_sr = active_tbl->win[index].success_ratio; + s32 active_tpt = active_tbl->expected_tpt[index]; + + /* expected "search" throughput */ + s32 *tpt_tbl = tbl->expected_tpt; + + s32 new_rate, high, low, start_hi; + u16 high_low; + s8 rate = index; + + new_rate = high = low = start_hi = IWL_RATE_INVALID; + + while (1) { + high_low = rs_get_adjacent_rate(mvm, rate, rate_mask, + tbl->lq_type); + + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* + * Lower the "search" bit rate, to give new "search" mode + * approximately the same throughput as "active" if: + * + * 1) "Active" mode has been working modestly well (but not + * great), and expected "search" throughput (under perfect + * conditions) at candidate rate is above the actual + * measured "active" throughput (but less than expected + * "active" throughput under perfect conditions). + * OR + * 2) "Active" mode has been working perfectly or very well + * and expected "search" throughput (under perfect + * conditions) at candidate rate is above expected + * "active" throughput (under perfect conditions). + */ + if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) && + ((active_sr > IWL_RATE_DECREASE_TH) && + (active_sr <= IWL_RATE_HIGH_TH) && + (tpt_tbl[rate] <= active_tpt))) || + ((active_sr >= IWL_RATE_SCALE_SWITCH) && + (tpt_tbl[rate] > active_tpt))) { + /* (2nd or later pass) + * If we've already tried to raise the rate, and are + * now trying to lower it, use the higher rate. */ + if (start_hi != IWL_RATE_INVALID) { + new_rate = start_hi; + break; + } + + new_rate = rate; + + /* Loop again with lower rate */ + if (low != IWL_RATE_INVALID) + rate = low; + + /* Lower rate not available, use the original */ + else + break; + + /* Else try to raise the "search" rate to match "active" */ + } else { + /* (2nd or later pass) + * If we've already tried to lower the rate, and are + * now trying to raise it, use the lower rate. */ + if (new_rate != IWL_RATE_INVALID) + break; + + /* Loop again with higher rate */ + else if (high != IWL_RATE_INVALID) { + start_hi = high; + rate = high; + + /* Higher rate not available, use the original */ + } else { + new_rate = rate; + break; + } + } + } + + return new_rate; +} + +static bool iwl_is_ht40_tx_allowed(struct ieee80211_sta *sta) +{ + return sta->bandwidth >= IEEE80211_STA_RX_BW_40; +} + +/* + * Set up search table for MIMO2 + */ +static int rs_switch_to_mimo2(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + s32 rate; + s8 is_green = lq_sta->is_green; + + if (!sta->ht_cap.ht_supported) + return -1; + + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + return -1; + + /* Need both Tx chains/antennas to support MIMO */ + if (num_of_ant(mvm->nvm_data->valid_tx_ant) < 2) + return -1; + + IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n"); + + tbl->lq_type = LQ_MIMO2; + tbl->action = 0; + tbl->max_search = IWL_MAX_SEARCH; + rate_mask = lq_sta->active_mimo2_rate; + + if (iwl_is_ht40_tx_allowed(sta)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + rs_set_expected_tpt_table(lq_sta, tbl); + + rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(mvm, "LQ: MIMO2 best rate %d mask %X\n", + rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green); + + IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Set up search table for MIMO3 + */ +static int rs_switch_to_mimo3(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + s32 rate; + s8 is_green = lq_sta->is_green; + + if (!sta->ht_cap.ht_supported) + return -1; + + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + return -1; + + /* Need both Tx chains/antennas to support MIMO */ + if (num_of_ant(mvm->nvm_data->valid_tx_ant) < 3) + return -1; + + IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO3\n"); + + tbl->lq_type = LQ_MIMO3; + tbl->action = 0; + tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; + rate_mask = lq_sta->active_mimo3_rate; + + if (iwl_is_ht40_tx_allowed(sta)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + rs_set_expected_tpt_table(lq_sta, tbl); + + rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(mvm, "LQ: MIMO3 best rate %d mask %X\n", + rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green); + + IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Set up search table for SISO + */ +static int rs_switch_to_siso(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + u8 is_green = lq_sta->is_green; + s32 rate; + + if (!sta->ht_cap.ht_supported) + return -1; + + IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n"); + + tbl->lq_type = LQ_SISO; + tbl->action = 0; + tbl->max_search = IWL_MAX_SEARCH; + rate_mask = lq_sta->active_siso_rate; + + if (iwl_is_ht40_tx_allowed(sta)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + if (is_green) + tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/ + + rs_set_expected_tpt_table(lq_sta, tbl); + rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(mvm, "LQ: get best rate %d mask %X\n", rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(mvm, + "can not switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green); + IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Try to switch to new modulation mode from legacy + */ +static int rs_move_legacy_other(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct ieee80211_sta *sta, + int index) +{ + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant; + u8 tx_chains_num = num_of_ant(valid_tx_ant); + int ret; + u8 update_search_tbl_counter = 0; + + start_action = tbl->action; + while (1) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_LEGACY_SWITCH_ANTENNA1: + case IWL_LEGACY_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n"); + + if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + /* Don't change antenna if success has been great */ + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + /* Set up search table to try other antenna */ + memcpy(search_tbl, tbl, sz); + + if (rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, + search_tbl)) { + update_search_tbl_counter = 1; + rs_set_expected_tpt_table(lq_sta, search_tbl); + goto out; + } + break; + case IWL_LEGACY_SWITCH_SISO: + IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to SISO\n"); + + /* Set up search table to try SISO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + ret = rs_switch_to_siso(mvm, lq_sta, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + + break; + case IWL_LEGACY_SWITCH_MIMO2_AB: + case IWL_LEGACY_SWITCH_MIMO2_AC: + case IWL_LEGACY_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n"); + + /* Set up search table to try MIMO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo2(mvm, lq_sta, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + break; + + case IWL_LEGACY_SWITCH_MIMO3_ABC: + IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO3\n"); + + /* Set up search table to try MIMO3 */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + search_tbl->ant_type = ANT_ABC; + + if (!rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo3(mvm, lq_sta, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + break; + } + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + +out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + return 0; +} + +/* + * Try to switch to new modulation mode from SISO + */ +static int rs_move_siso_to_other(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct ieee80211_sta *sta, int index) +{ + u8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant; + u8 tx_chains_num = num_of_ant(valid_tx_ant); + u8 update_search_tbl_counter = 0; + int ret; + + start_action = tbl->action; + while (1) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_SISO_SWITCH_ANTENNA1: + case IWL_SISO_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n"); + if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IWL_SISO_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, + search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IWL_SISO_SWITCH_MIMO2_AB: + case IWL_SISO_SWITCH_MIMO2_AC: + case IWL_SISO_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo2(mvm, lq_sta, sta, + search_tbl, index); + if (!ret) + goto out; + break; + case IWL_SISO_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n"); + + memcpy(search_tbl, tbl, sz); + if (is_green) { + if (!tbl->is_SGI) + break; + else + IWL_ERR(mvm, + "SGI was set in GF+SISO\n"); + } + search_tbl->is_SGI = !tbl->is_SGI; + rs_set_expected_tpt_table(lq_sta, search_tbl); + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + rate_n_flags_from_tbl(mvm, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + case IWL_SISO_SWITCH_MIMO3_ABC: + IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO3\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + search_tbl->ant_type = ANT_ABC; + + if (!rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo3(mvm, lq_sta, sta, + search_tbl, index); + if (!ret) + goto out; + break; + } + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; +} + +/* + * Try to switch to new modulation mode from MIMO2 + */ +static int rs_move_mimo2_to_other(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct ieee80211_sta *sta, int index) +{ + s8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant; + u8 tx_chains_num = num_of_ant(valid_tx_ant); + u8 update_search_tbl_counter = 0; + int ret; + + start_action = tbl->action; + while (1) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_MIMO2_SWITCH_ANTENNA1: + case IWL_MIMO2_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle Antennas\n"); + + if (tx_chains_num <= 2) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, + search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IWL_MIMO2_SWITCH_SISO_A: + case IWL_MIMO2_SWITCH_SISO_B: + case IWL_MIMO2_SWITCH_SISO_C: + IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n"); + + /* Set up new search table for SISO */ + memcpy(search_tbl, tbl, sz); + + if (tbl->action == IWL_MIMO2_SWITCH_SISO_A) + search_tbl->ant_type = ANT_A; + else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B) + search_tbl->ant_type = ANT_B; + else + search_tbl->ant_type = ANT_C; + + if (!rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = rs_switch_to_siso(mvm, lq_sta, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + + case IWL_MIMO2_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n"); + + /* Set up new search table for MIMO2 */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = !tbl->is_SGI; + rs_set_expected_tpt_table(lq_sta, search_tbl); + /* + * If active table already uses the fastest possible + * modulation (dual stream with short guard interval), + * and it's working well, there's no need to look + * for a better type of modulation! + */ + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + rate_n_flags_from_tbl(mvm, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + + case IWL_MIMO2_SWITCH_MIMO3_ABC: + IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to MIMO3\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + search_tbl->ant_type = ANT_ABC; + + if (!rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo3(mvm, lq_sta, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + } + tbl->action++; + if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) + tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) + tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; +} + +/* + * Try to switch to new modulation mode from MIMO3 + */ +static int rs_move_mimo3_to_other(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct ieee80211_sta *sta, int index) +{ + s8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant; + u8 tx_chains_num = num_of_ant(valid_tx_ant); + int ret; + u8 update_search_tbl_counter = 0; + + start_action = tbl->action; + while (1) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_MIMO3_SWITCH_ANTENNA1: + case IWL_MIMO3_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle Antennas\n"); + + if (tx_chains_num <= 3) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, + search_tbl)) + goto out; + break; + case IWL_MIMO3_SWITCH_SISO_A: + case IWL_MIMO3_SWITCH_SISO_B: + case IWL_MIMO3_SWITCH_SISO_C: + IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to SISO\n"); + + /* Set up new search table for SISO */ + memcpy(search_tbl, tbl, sz); + + if (tbl->action == IWL_MIMO3_SWITCH_SISO_A) + search_tbl->ant_type = ANT_A; + else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B) + search_tbl->ant_type = ANT_B; + else + search_tbl->ant_type = ANT_C; + + if (!rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = rs_switch_to_siso(mvm, lq_sta, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + + case IWL_MIMO3_SWITCH_MIMO2_AB: + case IWL_MIMO3_SWITCH_MIMO2_AC: + case IWL_MIMO3_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to MIMO2\n"); + + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo2(mvm, lq_sta, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + + case IWL_MIMO3_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle SGI/NGI\n"); + + /* Set up new search table for MIMO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = !tbl->is_SGI; + rs_set_expected_tpt_table(lq_sta, search_tbl); + /* + * If active table already uses the fastest possible + * modulation (dual stream with short guard interval), + * and it's working well, there's no need to look + * for a better type of modulation! + */ + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + rate_n_flags_from_tbl(mvm, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + } + tbl->action++; + if (tbl->action > IWL_MIMO3_SWITCH_GI) + tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_MIMO3_SWITCH_GI) + tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; +} + +/* + * Check whether we should continue using same modulation mode, or + * begin search for a new mode, based on: + * 1) # tx successes or failures while using this mode + * 2) # times calling this function + * 3) elapsed time in this mode (not used, for now) + */ +static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) +{ + struct iwl_scale_tbl_info *tbl; + int i; + int active_tbl; + int flush_interval_passed = 0; + struct iwl_mvm *mvm; + + mvm = lq_sta->drv; + active_tbl = lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + /* If we've been disallowing search, see if we should now allow it */ + if (lq_sta->stay_in_tbl) { + /* Elapsed time using current modulation mode */ + if (lq_sta->flush_timer) + flush_interval_passed = + time_after(jiffies, + (unsigned long)(lq_sta->flush_timer + + IWL_RATE_SCALE_FLUSH_INTVL)); + + /* + * Check if we should allow search for new modulation mode. + * If many frames have failed or succeeded, or we've used + * this same modulation for a long time, allow search, and + * reset history stats that keep track of whether we should + * allow a new search. Also (below) reset all bitmaps and + * stats in active history. + */ + if (force_search || + (lq_sta->total_failed > lq_sta->max_failure_limit) || + (lq_sta->total_success > lq_sta->max_success_limit) || + ((!lq_sta->search_better_tbl) && + (lq_sta->flush_timer) && (flush_interval_passed))) { + IWL_DEBUG_RATE(mvm, + "LQ: stay is expired %d %d %d\n", + lq_sta->total_failed, + lq_sta->total_success, + flush_interval_passed); + + /* Allow search for new mode */ + lq_sta->stay_in_tbl = 0; /* only place reset */ + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = 0; + /* + * Else if we've used this modulation mode enough repetitions + * (regardless of elapsed time or success/failure), reset + * history bitmaps and rate-specific stats for all rates in + * active table. + */ + } else { + lq_sta->table_count++; + if (lq_sta->table_count >= + lq_sta->table_count_limit) { + lq_sta->table_count = 0; + + IWL_DEBUG_RATE(mvm, + "LQ: stay in table clear win\n"); + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window( + &(tbl->win[i])); + } + } + + /* If transitioning to allow "search", reset all history + * bitmaps and stats in active table (this will become the new + * "search" table). */ + if (!lq_sta->stay_in_tbl) { + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(tbl->win[i])); + } + } +} + +/* + * setup rate table in uCode + */ +static void rs_update_rate_tbl(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + int index, u8 is_green) +{ + u32 rate; + + /* Update uCode's rate table. */ + rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green); + rs_fill_link_cmd(mvm, lq_sta, rate); + iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false); +} + +/* + * Do rate scaling and search for new modulation mode. + */ +static void rs_rate_scale_perform(struct iwl_mvm *mvm, + struct sk_buff *skb, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int low = IWL_RATE_INVALID; + int high = IWL_RATE_INVALID; + int index; + int i; + struct iwl_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + u16 rate_mask; + u8 update_lq = 0; + struct iwl_scale_tbl_info *tbl, *tbl1; + u16 rate_scale_index_msk = 0; + u8 is_green = 0; + u8 active_tbl = 0; + u8 done_search = 0; + u16 high_low; + s32 sr; + u8 tid = IWL_MAX_TID_COUNT; + struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv; + struct iwl_mvm_tid_data *tid_data; + + IWL_DEBUG_RATE(mvm, "rate scale calculate new rate for skb\n"); + + /* Send management frames and NO_ACK data using lowest rate. */ + /* TODO: this could probably be improved.. */ + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; + + tid = rs_tl_add_packet(lq_sta, hdr); + if ((tid != IWL_MAX_TID_COUNT) && + (lq_sta->tx_agg_tid_en & (1 << tid))) { + tid_data = &sta_priv->tid_data[tid]; + if (tid_data->state == IWL_AGG_OFF) + lq_sta->is_agg = 0; + else + lq_sta->is_agg = 1; + } else { + lq_sta->is_agg = 0; + } + + /* + * Select rate-scale / modulation-mode table to work with in + * the rest of this function: "search" if searching for better + * modulation mode, or "active" if doing rate scaling within a mode. + */ + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + if (is_legacy(tbl->lq_type)) + lq_sta->is_green = 0; + else + lq_sta->is_green = rs_use_green(sta); + is_green = lq_sta->is_green; + + /* current tx rate */ + index = lq_sta->last_txrate_idx; + + IWL_DEBUG_RATE(mvm, "Rate scale index %d for type %d\n", index, + tbl->lq_type); + + /* rates available for this association, and for modulation mode */ + rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); + + IWL_DEBUG_RATE(mvm, "mask 0x%04X\n", rate_mask); + + /* mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + if (lq_sta->band == IEEE80211_BAND_5GHZ) + /* supp_rates has no CCK bits in A mode */ + rate_scale_index_msk = (u16) (rate_mask & + (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_scale_index_msk = (u16) (rate_mask & + lq_sta->supp_rates); + + } else { + rate_scale_index_msk = rate_mask; + } + + if (!rate_scale_index_msk) + rate_scale_index_msk = rate_mask; + + if (!((1 << index) & rate_scale_index_msk)) { + IWL_ERR(mvm, "Current Rate is not valid\n"); + if (lq_sta->search_better_tbl) { + /* revert to active table if search table is not valid*/ + tbl->lq_type = LQ_NONE; + lq_sta->search_better_tbl = 0; + tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + /* get "active" rate info */ + index = iwl_hwrate_to_plcp_idx(tbl->current_rate); + rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green); + } + return; + } + + /* Get expected throughput table and history window for current rate */ + if (!tbl->expected_tpt) { + IWL_ERR(mvm, "tbl->expected_tpt is NULL\n"); + return; + } + + /* force user max rate if set by user */ + if ((lq_sta->max_rate_idx != -1) && + (lq_sta->max_rate_idx < index)) { + index = lq_sta->max_rate_idx; + update_lq = 1; + window = &(tbl->win[index]); + goto lq_update; + } + + window = &(tbl->win[index]); + + /* + * If there is not enough history to calculate actual average + * throughput, keep analyzing results of more tx frames, without + * changing rate or mode (bypass most of the rest of this function). + * Set up new rate table in uCode only if old rate is not supported + * in current association (use new rate found above). + */ + fail_count = window->counter - window->success_counter; + if ((fail_count < IWL_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) { + IWL_DEBUG_RATE(mvm, + "LQ: still below TH. succ=%d total=%d for index %d\n", + window->success_counter, window->counter, index); + + /* Can't calculate this yet; not enough history */ + window->average_tpt = IWL_INVALID_VALUE; + + /* Should we stay with this modulation mode, + * or search for a new one? */ + rs_stay_in_table(lq_sta, false); + + goto out; + } + /* Else we have enough samples; calculate estimate of + * actual average throughput */ + if (window->average_tpt != ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128)) { + IWL_ERR(mvm, + "expected_tpt should have been calculated by now\n"); + window->average_tpt = ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128); + } + + /* If we are searching for better modulation mode, check success. */ + if (lq_sta->search_better_tbl) { + /* If good success, continue using the "search" mode; + * no need to send new link quality command, since we're + * continuing to use the setup that we've been trying. */ + if (window->average_tpt > lq_sta->last_tpt) { + IWL_DEBUG_RATE(mvm, + "LQ: SWITCHING TO NEW TABLE suc=%d cur-tpt=%d old-tpt=%d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + if (!is_legacy(tbl->lq_type)) + lq_sta->enable_counter = 1; + + /* Swap tables; "search" becomes "active" */ + lq_sta->active_tbl = active_tbl; + current_tpt = window->average_tpt; + /* Else poor success; go back to mode in "active" table */ + } else { + IWL_DEBUG_RATE(mvm, + "LQ: GOING BACK TO THE OLD TABLE suc=%d cur-tpt=%d old-tpt=%d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + /* Nullify "search" table */ + tbl->lq_type = LQ_NONE; + + /* Revert to "active" table */ + active_tbl = lq_sta->active_tbl; + tbl = &(lq_sta->lq_info[active_tbl]); + + /* Revert to "active" rate and throughput info */ + index = iwl_hwrate_to_plcp_idx(tbl->current_rate); + current_tpt = lq_sta->last_tpt; + + /* Need to set up a new rate table in uCode */ + update_lq = 1; + } + + /* Either way, we've made a decision; modulation mode + * search is done, allow rate adjustment next time. */ + lq_sta->search_better_tbl = 0; + done_search = 1; /* Don't switch modes below! */ + goto lq_update; + } + + /* (Else) not in search of better modulation mode, try for better + * starting rate, while staying in this mode. */ + high_low = rs_get_adjacent_rate(mvm, index, rate_scale_index_msk, + tbl->lq_type); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* If user set max rate, dont allow higher than user constrain */ + if ((lq_sta->max_rate_idx != -1) && + (lq_sta->max_rate_idx < high)) + high = IWL_RATE_INVALID; + + sr = window->success_ratio; + + /* Collect measured throughputs for current and adjacent rates */ + current_tpt = window->average_tpt; + if (low != IWL_RATE_INVALID) + low_tpt = tbl->win[low].average_tpt; + if (high != IWL_RATE_INVALID) + high_tpt = tbl->win[high].average_tpt; + + scale_action = 0; + + /* Too many failures, decrease rate */ + if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) { + IWL_DEBUG_RATE(mvm, + "decrease rate because of low success_ratio\n"); + scale_action = -1; + /* No throughput measured yet for adjacent rates; try increase. */ + } else if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE)) { + if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) + scale_action = 1; + else if (low != IWL_RATE_INVALID) + scale_action = 0; + } + + /* Both adjacent throughputs are measured, but neither one has better + * throughput; we're using the best rate, don't change it! */ + else if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt) && + (high_tpt < current_tpt)) + scale_action = 0; + + /* At least one adjacent rate's throughput is measured, + * and may have better performance. */ + else { + /* Higher adjacent rate's throughput is measured */ + if (high_tpt != IWL_INVALID_VALUE) { + /* Higher rate has better throughput */ + if (high_tpt > current_tpt && + sr >= IWL_RATE_INCREASE_TH) { + scale_action = 1; + } else { + scale_action = 0; + } + + /* Lower adjacent rate's throughput is measured */ + } else if (low_tpt != IWL_INVALID_VALUE) { + /* Lower rate has better throughput */ + if (low_tpt > current_tpt) { + IWL_DEBUG_RATE(mvm, + "decrease rate because of low tpt\n"); + scale_action = -1; + } else if (sr >= IWL_RATE_INCREASE_TH) { + scale_action = 1; + } + } + } + + /* Sanity check; asked for decrease, but success rate or throughput + * has been good at old rate. Don't change it. */ + if ((scale_action == -1) && (low != IWL_RATE_INVALID) && + ((sr > IWL_RATE_HIGH_TH) || + (current_tpt > (100 * tbl->expected_tpt[low])))) + scale_action = 0; + + switch (scale_action) { + case -1: + /* Decrease starting rate, update uCode's rate table */ + if (low != IWL_RATE_INVALID) { + update_lq = 1; + index = low; + } + + break; + case 1: + /* Increase starting rate, update uCode's rate table */ + if (high != IWL_RATE_INVALID) { + update_lq = 1; + index = high; + } + + break; + case 0: + /* No change */ + default: + break; + } + + IWL_DEBUG_RATE(mvm, + "choose rate scale index %d action %d low %d high %d type %d\n", + index, scale_action, low, high, tbl->lq_type); + +lq_update: + /* Replace uCode's rate table for the destination station. */ + if (update_lq) + rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green); + + rs_stay_in_table(lq_sta, false); + + /* + * Search for new modulation mode if we're: + * 1) Not changing rates right now + * 2) Not just finishing up a search + * 3) Allowing a new search + */ + if (!update_lq && !done_search && + !lq_sta->stay_in_tbl && window->counter) { + /* Save current throughput to compare with "search" throughput*/ + lq_sta->last_tpt = current_tpt; + + /* Select a new "search" modulation mode to try. + * If one is found, set up the new "search" table. */ + if (is_legacy(tbl->lq_type)) + rs_move_legacy_other(mvm, lq_sta, sta, index); + else if (is_siso(tbl->lq_type)) + rs_move_siso_to_other(mvm, lq_sta, sta, index); + else if (is_mimo2(tbl->lq_type)) + rs_move_mimo2_to_other(mvm, lq_sta, sta, index); + else + rs_move_mimo3_to_other(mvm, lq_sta, sta, index); + + /* If new "search" mode was selected, set up in uCode table */ + if (lq_sta->search_better_tbl) { + /* Access the "search" table, clear its history. */ + tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(tbl->win[i])); + + /* Use new "search" start rate */ + index = iwl_hwrate_to_plcp_idx(tbl->current_rate); + + IWL_DEBUG_RATE(mvm, + "Switch current mcs: %X index: %d\n", + tbl->current_rate, index); + rs_fill_link_cmd(mvm, lq_sta, tbl->current_rate); + iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false); + } else { + done_search = 1; + } + } + + if (done_search && !lq_sta->stay_in_tbl) { + /* If the "active" (non-search) mode was legacy, + * and we've tried switching antennas, + * but we haven't been able to try HT modes (not available), + * stay with best antenna legacy modulation for a while + * before next round of mode comparisons. */ + tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); + if (is_legacy(tbl1->lq_type) && !sta->ht_cap.ht_supported && + lq_sta->action_counter > tbl1->max_search) { + IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n"); + rs_set_stay_in_table(mvm, 1, lq_sta); + } + + /* If we're in an HT mode, and all 3 mode switch actions + * have been tried and compared, stay in this best modulation + * mode for a while before next round of mode comparisons. */ + if (lq_sta->enable_counter && + (lq_sta->action_counter >= tbl1->max_search)) { + if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && + (lq_sta->tx_agg_tid_en & (1 << tid)) && + (tid != IWL_MAX_TID_COUNT)) { + tid_data = &sta_priv->tid_data[tid]; + if (tid_data->state == IWL_AGG_OFF) { + IWL_DEBUG_RATE(mvm, + "try to aggregate tid %d\n", + tid); + rs_tl_turn_on_agg(mvm, tid, + lq_sta, sta); + } + } + rs_set_stay_in_table(mvm, 0, lq_sta); + } + } + +out: + tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green); + lq_sta->last_txrate_idx = index; +} + +/** + * rs_initialize_lq - Initialize a station's hardware rate table + * + * The uCode's station table contains a table of fallback rates + * for automatic fallback during transmission. + * + * NOTE: This sets up a default set of values. These will be replaced later + * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of + * rc80211_simple. + * + * NOTE: Run REPLY_ADD_STA command to set up station table entry, before + * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, + * which requires station table entry to exist). + */ +static void rs_initialize_lq(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + enum ieee80211_band band) +{ + struct iwl_scale_tbl_info *tbl; + int rate_idx; + int i; + u32 rate; + u8 use_green = rs_use_green(sta); + u8 active_tbl = 0; + u8 valid_tx_ant; + + if (!sta || !lq_sta) + return; + + i = lq_sta->last_txrate_idx; + + valid_tx_ant = mvm->nvm_data->valid_tx_ant; + + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + if ((i < 0) || (i >= IWL_RATE_COUNT)) + i = 0; + + rate = iwl_rates[i].plcp; + tbl->ant_type = first_antenna(valid_tx_ant); + rate |= tbl->ant_type << RATE_MCS_ANT_POS; + + if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) + rate |= RATE_MCS_CCK_MSK; + + rs_get_tbl_info_from_mcs(rate, band, tbl, &rate_idx); + if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) + rs_toggle_antenna(valid_tx_ant, &rate, tbl); + + rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx, use_green); + tbl->current_rate = rate; + rs_set_expected_tpt_table(lq_sta, tbl); + rs_fill_link_cmd(NULL, lq_sta, rate); + /* TODO restore station should remember the lq cmd */ + iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true); +} + +static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, + struct ieee80211_tx_rate_control *txrc) +{ + struct sk_buff *skb = txrc->skb; + struct ieee80211_supported_band *sband = txrc->sband; + struct iwl_op_mode *op_mode __maybe_unused = + (struct iwl_op_mode *)mvm_r; + struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_lq_sta *lq_sta = mvm_sta; + int rate_idx; + + IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n"); + + /* Get max rate if user set max rate */ + if (lq_sta) { + lq_sta->max_rate_idx = txrc->max_rate_idx; + if ((sband->band == IEEE80211_BAND_5GHZ) && + (lq_sta->max_rate_idx != -1)) + lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; + if ((lq_sta->max_rate_idx < 0) || + (lq_sta->max_rate_idx >= IWL_RATE_COUNT)) + lq_sta->max_rate_idx = -1; + } + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (lq_sta && !lq_sta->drv) { + IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n"); + mvm_sta = NULL; + } + + /* Send management frames and NO_ACK data using lowest rate. */ + if (rate_control_send_low(sta, mvm_sta, txrc)) + return; + + rate_idx = lq_sta->last_txrate_idx; + + if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) { + rate_idx -= IWL_FIRST_OFDM_RATE; + /* 6M and 9M shared same MCS index */ + rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0; + if (rs_extract_rate(lq_sta->last_rate_n_flags) >= + IWL_RATE_MIMO3_6M_PLCP) + rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM); + else if (rs_extract_rate(lq_sta->last_rate_n_flags) >= + IWL_RATE_MIMO2_6M_PLCP) + rate_idx = rate_idx + MCS_INDEX_PER_STREAM; + info->control.rates[0].flags = IEEE80211_TX_RC_MCS; + if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK) + info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI; + if (lq_sta->last_rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */ + info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + if (lq_sta->last_rate_n_flags & RATE_HT_MCS_GF_MSK) + info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD; + } else { + /* Check for invalid rates */ + if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) || + ((sband->band == IEEE80211_BAND_5GHZ) && + (rate_idx < IWL_FIRST_OFDM_RATE))) + rate_idx = rate_lowest_index(sband, sta); + /* On valid 5 GHz rate, adjust index */ + else if (sband->band == IEEE80211_BAND_5GHZ) + rate_idx -= IWL_FIRST_OFDM_RATE; + info->control.rates[0].flags = 0; + } + info->control.rates[0].idx = rate_idx; +} + +static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, + gfp_t gfp) +{ + struct iwl_mvm_sta *sta_priv = (struct iwl_mvm_sta *)sta->drv_priv; + struct iwl_op_mode *op_mode __maybe_unused = + (struct iwl_op_mode *)mvm_rate; + struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); + + IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); + + return &sta_priv->lq_sta; +} + +/* + * Called after adding a new station to initialize rate scaling + */ +void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + enum ieee80211_band band) +{ + int i, j; + struct ieee80211_hw *hw = mvm->hw; + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct iwl_mvm_sta *sta_priv; + struct iwl_lq_sta *lq_sta; + struct ieee80211_supported_band *sband; + unsigned long supp; /* must be unsigned long for for_each_set_bit */ + + sta_priv = (struct iwl_mvm_sta *)sta->drv_priv; + lq_sta = &sta_priv->lq_sta; + sband = hw->wiphy->bands[band]; + + lq_sta->lq.sta_id = sta_priv->sta_id; + + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); + + lq_sta->flush_timer = 0; + lq_sta->supp_rates = sta->supp_rates[sband->band]; + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); + + IWL_DEBUG_RATE(mvm, + "LQ: *** rate scale station global init for station %d ***\n", + sta_priv->sta_id); + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + lq_sta->max_rate_idx = -1; + lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; + lq_sta->is_green = rs_use_green(sta); + lq_sta->band = sband->band; + /* + * active legacy rates as per supported rates bitmap + */ + supp = sta->supp_rates[sband->band]; + lq_sta->active_legacy_rate = 0; + for_each_set_bit(i, &supp, BITS_PER_LONG) + lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value); + + /* + * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), + * supp_rates[] does not; shift to convert format, force 9 MBits off. + */ + lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; + lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; + lq_sta->active_siso_rate &= ~((u16)0x2); + lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE; + + /* Same here */ + lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; + lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; + lq_sta->active_mimo2_rate &= ~((u16)0x2); + lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; + + lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1; + lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1; + lq_sta->active_mimo3_rate &= ~((u16)0x2); + lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE; + + IWL_DEBUG_RATE(mvm, + "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n", + lq_sta->active_siso_rate, + lq_sta->active_mimo2_rate, + lq_sta->active_mimo3_rate); + + /* These values will be overridden later */ + lq_sta->lq.single_stream_ant_msk = + first_antenna(mvm->nvm_data->valid_tx_ant); + lq_sta->lq.dual_stream_ant_msk = + mvm->nvm_data->valid_tx_ant & + ~first_antenna(mvm->nvm_data->valid_tx_ant); + if (!lq_sta->lq.dual_stream_ant_msk) { + lq_sta->lq.dual_stream_ant_msk = ANT_AB; + } else if (num_of_ant(mvm->nvm_data->valid_tx_ant) == 2) { + lq_sta->lq.dual_stream_ant_msk = + mvm->nvm_data->valid_tx_ant; + } + + /* as default allow aggregation for all tids */ + lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; + lq_sta->drv = mvm; + + /* Set last_txrate_idx to lowest rate */ + lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); + if (sband->band == IEEE80211_BAND_5GHZ) + lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; + lq_sta->is_agg = 0; +#ifdef CONFIG_MAC80211_DEBUGFS + lq_sta->dbg_fixed_rate = 0; +#endif + + rs_initialize_lq(mvm, sta, lq_sta, band); +} + +static void rs_fill_link_cmd(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, u32 new_rate) +{ + struct iwl_scale_tbl_info tbl_type; + int index = 0; + int rate_idx; + int repeat_rate = 0; + u8 ant_toggle_cnt = 0; + u8 use_ht_possible = 1; + u8 valid_tx_ant = 0; + struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; + + /* Override starting rate (index 0) if needed for debug purposes */ + rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Interpret new_rate (rate_n_flags) */ + rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, + &tbl_type, &rate_idx); + + /* How many times should we repeat the initial rate? */ + if (is_legacy(tbl_type.lq_type)) { + ant_toggle_cnt = 1; + repeat_rate = IWL_NUMBER_TRY; + } else { + repeat_rate = min(IWL_HT_NUMBER_TRY, + LINK_QUAL_AGG_DISABLE_START_DEF - 1); + } + + lq_cmd->mimo_delim = is_mimo(tbl_type.lq_type) ? 1 : 0; + + /* Fill 1st table entry (index 0) */ + lq_cmd->rs_table[index] = cpu_to_le32(new_rate); + + if (num_of_ant(tbl_type.ant_type) == 1) + lq_cmd->single_stream_ant_msk = tbl_type.ant_type; + else if (num_of_ant(tbl_type.ant_type) == 2) + lq_cmd->dual_stream_ant_msk = tbl_type.ant_type; + /* otherwise we don't modify the existing value */ + + index++; + repeat_rate--; + if (mvm) + valid_tx_ant = mvm->nvm_data->valid_tx_ant; + + /* Fill rest of rate table */ + while (index < LINK_QUAL_MAX_RETRY_NUM) { + /* Repeat initial/next rate. + * For legacy IWL_NUMBER_TRY == 1, this loop will not execute. + * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */ + while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (mvm && + rs_toggle_antenna(valid_tx_ant, + &new_rate, &tbl_type)) + ant_toggle_cnt = 1; + } + + /* Override next rate if needed for debug purposes */ + rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Fill next table entry */ + lq_cmd->rs_table[index] = + cpu_to_le32(new_rate); + repeat_rate--; + index++; + } + + rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type, + &rate_idx); + + + /* Indicate to uCode which entries might be MIMO. + * If initial rate was MIMO, this will finally end up + * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */ + if (is_mimo(tbl_type.lq_type)) + lq_cmd->mimo_delim = index; + + /* Get next rate */ + new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx, + use_ht_possible); + + /* How many times should we repeat the next rate? */ + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (mvm && + rs_toggle_antenna(valid_tx_ant, + &new_rate, &tbl_type)) + ant_toggle_cnt = 1; + + repeat_rate = IWL_NUMBER_TRY; + } else { + repeat_rate = IWL_HT_NUMBER_TRY; + } + + /* Don't allow HT rates after next pass. + * rs_get_lower_rate() will change type to LQ_A or LQ_G. */ + use_ht_possible = 0; + + /* Override next rate if needed for debug purposes */ + rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Fill next table entry */ + lq_cmd->rs_table[index] = cpu_to_le32(new_rate); + + index++; + repeat_rate--; + } + + lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + + lq_cmd->agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); +} + +static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + return hw->priv; +} +/* rate scale requires free function to be implemented */ +static void rs_free(void *mvm_rate) +{ + return; +} + +static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, + void *mvm_sta) +{ + struct iwl_op_mode *op_mode __maybe_unused = mvm_r; + struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); + + IWL_DEBUG_RATE(mvm, "enter\n"); + IWL_DEBUG_RATE(mvm, "leave\n"); +} + +#ifdef CONFIG_MAC80211_DEBUGFS +static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index) +{ + struct iwl_mvm *mvm; + u8 valid_tx_ant; + u8 ant_sel_tx; + + mvm = lq_sta->drv; + valid_tx_ant = mvm->nvm_data->valid_tx_ant; + if (lq_sta->dbg_fixed_rate) { + ant_sel_tx = + ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) + >> RATE_MCS_ANT_POS); + if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) { + *rate_n_flags = lq_sta->dbg_fixed_rate; + IWL_DEBUG_RATE(mvm, "Fixed rate ON\n"); + } else { + lq_sta->dbg_fixed_rate = 0; + IWL_ERR(mvm, + "Invalid antenna selection 0x%X, Valid is 0x%X\n", + ant_sel_tx, valid_tx_ant); + IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n"); + } + } else { + IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n"); + } +} + +static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_mvm *mvm; + char buf[64]; + size_t buf_size; + u32 parsed_rate; + + + mvm = lq_sta->drv; + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x", &parsed_rate) == 1) + lq_sta->dbg_fixed_rate = parsed_rate; + else + lq_sta->dbg_fixed_rate = 0; + + rs_program_fix_rate(mvm, lq_sta); + + return count; +} + +static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i = 0; + int index = 0; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_mvm *mvm; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + + mvm = lq_sta->drv; + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); + desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", + lq_sta->total_failed, lq_sta->total_success, + lq_sta->active_legacy_rate); + desc += sprintf(buff+desc, "fixed rate 0x%X\n", + lq_sta->dbg_fixed_rate); + desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", + (mvm->nvm_data->valid_tx_ant & ANT_A) ? "ANT_A," : "", + (mvm->nvm_data->valid_tx_ant & ANT_B) ? "ANT_B," : "", + (mvm->nvm_data->valid_tx_ant & ANT_C) ? "ANT_C" : ""); + desc += sprintf(buff+desc, "lq type %s\n", + (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); + if (is_Ht(tbl->lq_type)) { + desc += sprintf(buff+desc, " %s", + (is_siso(tbl->lq_type)) ? "SISO" : + ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3")); + desc += sprintf(buff+desc, " %s", + (tbl->is_ht40) ? "40MHz" : "20MHz"); + desc += sprintf(buff+desc, " %s %s %s\n", + (tbl->is_SGI) ? "SGI" : "", + (lq_sta->is_green) ? "GF enabled" : "", + (lq_sta->is_agg) ? "AGG on" : ""); + } + desc += sprintf(buff+desc, "last tx rate=0x%X\n", + lq_sta->last_rate_n_flags); + desc += sprintf(buff+desc, + "general: flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", + lq_sta->lq.flags, + lq_sta->lq.mimo_delim, + lq_sta->lq.single_stream_ant_msk, + lq_sta->lq.dual_stream_ant_msk); + + desc += sprintf(buff+desc, + "agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", + le16_to_cpu(lq_sta->lq.agg_time_limit), + lq_sta->lq.agg_disable_start_th, + lq_sta->lq.agg_frame_cnt_limit); + + desc += sprintf(buff+desc, + "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", + lq_sta->lq.initial_rate_index[0], + lq_sta->lq.initial_rate_index[1], + lq_sta->lq.initial_rate_index[2], + lq_sta->lq.initial_rate_index[3]); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + index = iwl_hwrate_to_plcp_idx( + le32_to_cpu(lq_sta->lq.rs_table[i])); + if (is_legacy(tbl->lq_type)) { + desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n", + i, le32_to_cpu(lq_sta->lq.rs_table[i]), + iwl_rate_mcs[index].mbps); + } else { + desc += sprintf(buff+desc, + " rate[%d] 0x%X %smbps (%s)\n", + i, le32_to_cpu(lq_sta->lq.rs_table[i]), + iwl_rate_mcs[index].mbps, + iwl_rate_mcs[index].mcs); + } + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_scale_table_ops = { + .write = rs_sta_dbgfs_scale_table_write, + .read = rs_sta_dbgfs_scale_table_read, + .open = simple_open, + .llseek = default_llseek, +}; +static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i, j; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + for (i = 0; i < LQ_SIZE; i++) { + desc += sprintf(buff+desc, + "%s type=%d SGI=%d HT40=%d DUP=0 GF=%d\n" + "rate=0x%X\n", + lq_sta->active_tbl == i ? "*" : "x", + lq_sta->lq_info[i].lq_type, + lq_sta->lq_info[i].is_SGI, + lq_sta->lq_info[i].is_ht40, + lq_sta->is_green, + lq_sta->lq_info[i].current_rate); + for (j = 0; j < IWL_RATE_COUNT; j++) { + desc += sprintf(buff+desc, + "counter=%d success=%d %%=%d\n", + lq_sta->lq_info[i].win[j].counter, + lq_sta->lq_info[i].win[j].success_counter, + lq_sta->lq_info[i].win[j].success_ratio); + } + } + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = rs_sta_dbgfs_stats_table_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; + char buff[120]; + int desc = 0; + + if (is_Ht(tbl->lq_type)) + desc += sprintf(buff+desc, + "Bit Rate= %d Mb/s\n", + tbl->expected_tpt[lq_sta->last_txrate_idx]); + else + desc += sprintf(buff+desc, + "Bit Rate= %d Mb/s\n", + iwl_rates[lq_sta->last_txrate_idx].ieee >> 1); + + return simple_read_from_buffer(user_buf, count, ppos, buff, desc); +} + +static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { + .read = rs_sta_dbgfs_rate_scale_data_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) +{ + struct iwl_lq_sta *lq_sta = mvm_sta; + lq_sta->rs_sta_dbgfs_scale_table_file = + debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, + lq_sta, &rs_sta_dbgfs_scale_table_ops); + lq_sta->rs_sta_dbgfs_stats_table_file = + debugfs_create_file("rate_stats_table", S_IRUSR, dir, + lq_sta, &rs_sta_dbgfs_stats_table_ops); + lq_sta->rs_sta_dbgfs_rate_scale_data_file = + debugfs_create_file("rate_scale_data", S_IRUSR, dir, + lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); + lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = + debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, + &lq_sta->tx_agg_tid_en); +} + +static void rs_remove_debugfs(void *mvm, void *mvm_sta) +{ + struct iwl_lq_sta *lq_sta = mvm_sta; + debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); +} +#endif + +/* + * Initialization of rate scaling information is done by driver after + * the station is added. Since mac80211 calls this function before a + * station is added we ignore it. + */ +static void rs_rate_init_stub(void *mvm_r, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *mvm_sta) +{ +} +static struct rate_control_ops rs_mvm_ops = { + .module = NULL, + .name = RS_NAME, + .tx_status = rs_tx_status, + .get_rate = rs_get_rate, + .rate_init = rs_rate_init_stub, + .alloc = rs_alloc, + .free = rs_free, + .alloc_sta = rs_alloc_sta, + .free_sta = rs_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = rs_add_debugfs, + .remove_sta_debugfs = rs_remove_debugfs, +#endif +}; + +int iwl_mvm_rate_control_register(void) +{ + return ieee80211_rate_control_register(&rs_mvm_ops); +} + +void iwl_mvm_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&rs_mvm_ops); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h new file mode 100644 index 000000000000..219c6857cc0f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -0,0 +1,393 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __rs_h__ +#define __rs_h__ + +#include <net/mac80211.h> + +#include "iwl-config.h" + +#include "fw-api.h" +#include "iwl-trans.h" + +struct iwl_rs_rate_info { + u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ + u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ + u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ + u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ + u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ +}; + +#define IWL_RATE_60M_PLCP 3 + +enum { + IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, + IWL_RATE_INVALID = IWL_RATE_COUNT, +}; + +#define LINK_QUAL_MAX_RETRY_NUM 16 + +enum { + IWL_RATE_6M_INDEX_TABLE = 0, + IWL_RATE_9M_INDEX_TABLE, + IWL_RATE_12M_INDEX_TABLE, + IWL_RATE_18M_INDEX_TABLE, + IWL_RATE_24M_INDEX_TABLE, + IWL_RATE_36M_INDEX_TABLE, + IWL_RATE_48M_INDEX_TABLE, + IWL_RATE_54M_INDEX_TABLE, + IWL_RATE_1M_INDEX_TABLE, + IWL_RATE_2M_INDEX_TABLE, + IWL_RATE_5M_INDEX_TABLE, + IWL_RATE_11M_INDEX_TABLE, + IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1, +}; + +/* #define vs. enum to keep from defaulting to 'large integer' */ +#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX) +#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX) +#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX) +#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX) +#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX) +#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX) +#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX) +#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX) +#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX) +#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX) +#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX) +#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX) +#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) + + +/* uCode API values for OFDM high-throughput (HT) bit rates */ +enum { + IWL_RATE_SISO_6M_PLCP = 0, + IWL_RATE_SISO_12M_PLCP = 1, + IWL_RATE_SISO_18M_PLCP = 2, + IWL_RATE_SISO_24M_PLCP = 3, + IWL_RATE_SISO_36M_PLCP = 4, + IWL_RATE_SISO_48M_PLCP = 5, + IWL_RATE_SISO_54M_PLCP = 6, + IWL_RATE_SISO_60M_PLCP = 7, + IWL_RATE_MIMO2_6M_PLCP = 0x8, + IWL_RATE_MIMO2_12M_PLCP = 0x9, + IWL_RATE_MIMO2_18M_PLCP = 0xa, + IWL_RATE_MIMO2_24M_PLCP = 0xb, + IWL_RATE_MIMO2_36M_PLCP = 0xc, + IWL_RATE_MIMO2_48M_PLCP = 0xd, + IWL_RATE_MIMO2_54M_PLCP = 0xe, + IWL_RATE_MIMO2_60M_PLCP = 0xf, + IWL_RATE_MIMO3_6M_PLCP = 0x10, + IWL_RATE_MIMO3_12M_PLCP = 0x11, + IWL_RATE_MIMO3_18M_PLCP = 0x12, + IWL_RATE_MIMO3_24M_PLCP = 0x13, + IWL_RATE_MIMO3_36M_PLCP = 0x14, + IWL_RATE_MIMO3_48M_PLCP = 0x15, + IWL_RATE_MIMO3_54M_PLCP = 0x16, + IWL_RATE_MIMO3_60M_PLCP = 0x17, + IWL_RATE_SISO_INVM_PLCP, + IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, + IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, +}; + +/* MAC header values for bit rates */ +enum { + IWL_RATE_6M_IEEE = 12, + IWL_RATE_9M_IEEE = 18, + IWL_RATE_12M_IEEE = 24, + IWL_RATE_18M_IEEE = 36, + IWL_RATE_24M_IEEE = 48, + IWL_RATE_36M_IEEE = 72, + IWL_RATE_48M_IEEE = 96, + IWL_RATE_54M_IEEE = 108, + IWL_RATE_60M_IEEE = 120, + IWL_RATE_1M_IEEE = 2, + IWL_RATE_2M_IEEE = 4, + IWL_RATE_5M_IEEE = 11, + IWL_RATE_11M_IEEE = 22, +}; + +#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) + +#define IWL_INVALID_VALUE -1 + +#define IWL_MIN_RSSI_VAL -100 +#define IWL_MAX_RSSI_VAL 0 + +/* These values specify how many Tx frame attempts before + * searching for a new modulation mode */ +#define IWL_LEGACY_FAILURE_LIMIT 160 +#define IWL_LEGACY_SUCCESS_LIMIT 480 +#define IWL_LEGACY_TABLE_COUNT 160 + +#define IWL_NONE_LEGACY_FAILURE_LIMIT 400 +#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500 +#define IWL_NONE_LEGACY_TABLE_COUNT 1500 + +/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */ +#define IWL_RS_GOOD_RATIO 12800 /* 100% */ +#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */ +#define IWL_RATE_HIGH_TH 10880 /* 85% */ +#define IWL_RATE_INCREASE_TH 6400 /* 50% */ +#define IWL_RATE_DECREASE_TH 1920 /* 15% */ + +/* possible actions when in legacy mode */ +#define IWL_LEGACY_SWITCH_ANTENNA1 0 +#define IWL_LEGACY_SWITCH_ANTENNA2 1 +#define IWL_LEGACY_SWITCH_SISO 2 +#define IWL_LEGACY_SWITCH_MIMO2_AB 3 +#define IWL_LEGACY_SWITCH_MIMO2_AC 4 +#define IWL_LEGACY_SWITCH_MIMO2_BC 5 +#define IWL_LEGACY_SWITCH_MIMO3_ABC 6 + +/* possible actions when in siso mode */ +#define IWL_SISO_SWITCH_ANTENNA1 0 +#define IWL_SISO_SWITCH_ANTENNA2 1 +#define IWL_SISO_SWITCH_MIMO2_AB 2 +#define IWL_SISO_SWITCH_MIMO2_AC 3 +#define IWL_SISO_SWITCH_MIMO2_BC 4 +#define IWL_SISO_SWITCH_GI 5 +#define IWL_SISO_SWITCH_MIMO3_ABC 6 + + +/* possible actions when in mimo mode */ +#define IWL_MIMO2_SWITCH_ANTENNA1 0 +#define IWL_MIMO2_SWITCH_ANTENNA2 1 +#define IWL_MIMO2_SWITCH_SISO_A 2 +#define IWL_MIMO2_SWITCH_SISO_B 3 +#define IWL_MIMO2_SWITCH_SISO_C 4 +#define IWL_MIMO2_SWITCH_GI 5 +#define IWL_MIMO2_SWITCH_MIMO3_ABC 6 + + +/* possible actions when in mimo3 mode */ +#define IWL_MIMO3_SWITCH_ANTENNA1 0 +#define IWL_MIMO3_SWITCH_ANTENNA2 1 +#define IWL_MIMO3_SWITCH_SISO_A 2 +#define IWL_MIMO3_SWITCH_SISO_B 3 +#define IWL_MIMO3_SWITCH_SISO_C 4 +#define IWL_MIMO3_SWITCH_MIMO2_AB 5 +#define IWL_MIMO3_SWITCH_MIMO2_AC 6 +#define IWL_MIMO3_SWITCH_MIMO2_BC 7 +#define IWL_MIMO3_SWITCH_GI 8 + + +#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI +#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC + +/*FIXME:RS:add possible actions for MIMO3*/ + +#define IWL_ACTION_LIMIT 3 /* # possible actions */ + +#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ +#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) +#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100) + +#define LINK_QUAL_AGG_DISABLE_START_DEF (3) +#define LINK_QUAL_AGG_DISABLE_START_MAX (255) +#define LINK_QUAL_AGG_DISABLE_START_MIN (0) + +#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) +#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) +#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) + +#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ + +/* load per tid defines for A-MPDU activation */ +#define IWL_AGG_TPT_THREHOLD 0 +#define IWL_AGG_LOAD_THRESHOLD 10 +#define IWL_AGG_ALL_TID 0xff +#define TID_QUEUE_CELL_SPACING 50 /*mS */ +#define TID_QUEUE_MAX_SIZE 20 +#define TID_ROUND_VALUE 5 /* mS */ + +#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) +#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) + +enum iwl_table_type { + LQ_NONE, + LQ_G, /* legacy types */ + LQ_A, + LQ_SISO, /* high-throughput types */ + LQ_MIMO2, + LQ_MIMO3, + LQ_MAX, +}; + +#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) +#define is_siso(tbl) ((tbl) == LQ_SISO) +#define is_mimo2(tbl) ((tbl) == LQ_MIMO2) +#define is_mimo3(tbl) ((tbl) == LQ_MIMO3) +#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl)) +#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) +#define is_a_band(tbl) ((tbl) == LQ_A) +#define is_g_and(tbl) ((tbl) == LQ_G) + +#define IWL_MAX_MCS_DISPLAY_SIZE 12 + +struct iwl_rate_mcs_info { + char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; + char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; +}; + +/** + * struct iwl_rate_scale_data -- tx success history for one rate + */ +struct iwl_rate_scale_data { + u64 data; /* bitmap of successful frames */ + s32 success_counter; /* number of frames successful */ + s32 success_ratio; /* per-cent * 128 */ + s32 counter; /* number of frames attempted */ + s32 average_tpt; /* success ratio * expected throughput */ + unsigned long stamp; +}; + +/** + * struct iwl_scale_tbl_info -- tx params and success history for all rates + * + * There are two of these in struct iwl_lq_sta, + * one for "active", and one for "search". + */ +struct iwl_scale_tbl_info { + enum iwl_table_type lq_type; + u8 ant_type; + u8 is_SGI; /* 1 = short guard interval */ + u8 is_ht40; /* 1 = 40 MHz channel width */ + u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ + u8 max_search; /* maximun number of tables we can search */ + s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ + u32 current_rate; /* rate_n_flags, uCode API format */ + struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ +}; + +struct iwl_traffic_load { + unsigned long time_stamp; /* age of the oldest statistics */ + u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time + * slice */ + u32 total; /* total num of packets during the + * last TID_MAX_TIME_DIFF */ + u8 queue_count; /* number of queues that has + * been used since the last cleanup */ + u8 head; /* start of the circular buffer */ +}; + +/** + * struct iwl_lq_sta -- driver's rate scaling private structure + * + * Pointer to this gets passed back and forth between driver and mac80211. + */ +struct iwl_lq_sta { + u8 active_tbl; /* index of active table, range 0-1 */ + u8 enable_counter; /* indicates HT mode */ + u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */ + u8 search_better_tbl; /* 1: currently trying alternate mode */ + s32 last_tpt; + + /* The following determine when to search for a new mode */ + u32 table_count_limit; + u32 max_failure_limit; /* # failed frames before new search */ + u32 max_success_limit; /* # successful frames before new search */ + u32 table_count; + u32 total_failed; /* total failed frames, any/all rates */ + u32 total_success; /* total successful frames, any/all rates */ + u64 flush_timer; /* time staying in mode before new search */ + + u8 action_counter; /* # mode-switch actions tried */ + u8 is_green; + enum ieee80211_band band; + + /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ + u32 supp_rates; + u16 active_legacy_rate; + u16 active_siso_rate; + u16 active_mimo2_rate; + u16 active_mimo3_rate; + s8 max_rate_idx; /* Max rate set by user */ + u8 missed_rate_counter; + + struct iwl_lq_cmd lq; + struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ + struct iwl_traffic_load load[IWL_MAX_TID_COUNT]; + u8 tx_agg_tid_en; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *rs_sta_dbgfs_scale_table_file; + struct dentry *rs_sta_dbgfs_stats_table_file; + struct dentry *rs_sta_dbgfs_rate_scale_data_file; + struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; + u32 dbg_fixed_rate; +#endif + struct iwl_mvm *drv; + + /* used to be in sta_info */ + int last_txrate_idx; + /* last tx rate_n_flags */ + u32 last_rate_n_flags; + /* packets destined for this STA are aggregated */ + u8 is_agg; + /* BT traffic this sta was last updated in */ + u8 last_bt_traffic; +}; + +static inline u8 num_of_ant(u8 mask) +{ + return !!((mask) & ANT_A) + + !!((mask) & ANT_B) + + !!((mask) & ANT_C); +} + +/* Initialize station's rate scaling information after adding station */ +extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + enum ieee80211_band band); + +/** + * iwl_rate_control_register - Register the rate control algorithm callbacks + * + * Since the rate control algorithm is hardware specific, there is no need + * or reason to place it as a stand alone module. The driver can call + * iwl_rate_control_register in order to register the rate control callbacks + * with the mac80211 subsystem. This should be performed prior to calling + * ieee80211_register_hw + * + */ +extern int iwl_mvm_rate_control_register(void); + +/** + * iwl_rate_control_unregister - Unregister the rate control callbacks + * + * This should be called after calling ieee80211_unregister_hw, but before + * the driver is unloaded. + */ +extern void iwl_mvm_rate_control_unregister(void); + +#endif /* __rs__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c new file mode 100644 index 000000000000..3f40ab05bbd8 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -0,0 +1,356 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +#include "iwl-trans.h" + +#include "mvm.h" +#include "fw-api.h" + +/* + * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler + * + * Copies the phy information in mvm->last_phy_info, it will be used when the + * actual data will come from the fw in the next packet. + */ +int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info)); + mvm->ampdu_ref++; + return 0; +} + +/* + * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211 + * + * Adds the rxb to a new skb and give it to mac80211 + */ +static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, + struct ieee80211_hdr *hdr, u16 len, + u32 ampdu_status, + struct iwl_rx_cmd_buffer *rxb, + struct ieee80211_rx_status *stats) +{ + struct sk_buff *skb; + unsigned int hdrlen, fraglen; + + /* Dont use dev_alloc_skb(), we'll have enough headroom once + * ieee80211_hdr pulled. + */ + skb = alloc_skb(128, GFP_ATOMIC); + if (!skb) { + IWL_ERR(mvm, "alloc_skb failed\n"); + return; + } + /* If frame is small enough to fit in skb->head, pull it completely. + * If not, only pull ieee80211_hdr so that splice() or TCP coalesce + * are more efficient. + */ + hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr); + + memcpy(skb_put(skb, hdrlen), hdr, hdrlen); + fraglen = len - hdrlen; + + if (fraglen) { + int offset = (void *)hdr + hdrlen - + rxb_addr(rxb) + rxb_offset(rxb); + + skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, + fraglen, rxb->truesize); + } + + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); + + ieee80211_rx_ni(mvm->hw, skb); +} + +/* + * iwl_mvm_calc_rssi - calculate the rssi in dBm + * @phy_info: the phy information for the coming packet + */ +static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, + struct iwl_rx_phy_info *phy_info) +{ + u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db; + u32 val; + + /* Find max rssi among 3 possible receivers. + * These values are measured by the Digital Signal Processor (DSP). + * They should stay fairly constant even as the signal strength varies, + * if the radio's Automatic Gain Control (AGC) is working right. + * AGC value (see below) will provide the "interesting" info. + */ + val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]); + rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS; + rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS; + val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]); + rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS; + + val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); + agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS; + + max_rssi = max_t(u32, rssi_a, rssi_b); + max_rssi = max_t(u32, max_rssi, rssi_c); + + IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", + rssi_a, rssi_b, rssi_c, max_rssi, agc_db); + + /* dBm = max_rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. */ + return max_rssi - agc_db - IWL_RSSI_OFFSET; +} + +/* + * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format + * @mvm: the mvm object + * @hdr: 80211 header + * @stats: status in mac80211's format + * @rx_pkt_status: status coming from fw + * + * returns non 0 value if the packet should be dropped + */ +static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, + struct ieee80211_hdr *hdr, + struct ieee80211_rx_status *stats, + u32 rx_pkt_status) +{ + if (!ieee80211_has_protected(hdr->frame_control) || + (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == + RX_MPDU_RES_STATUS_SEC_NO_ENC) + return 0; + + /* packet was encrypted with unknown alg */ + if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == + RX_MPDU_RES_STATUS_SEC_ENC_ERR) + return 0; + + switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) { + case RX_MPDU_RES_STATUS_SEC_CCM_ENC: + /* alg is CCM: check MIC only */ + if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) + return -1; + + stats->flag |= RX_FLAG_DECRYPTED; + IWL_DEBUG_WEP(mvm, "hw decrypted CCMP successfully\n"); + return 0; + + case RX_MPDU_RES_STATUS_SEC_TKIP_ENC: + /* Don't drop the frame and decrypt it in SW */ + if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) + return 0; + /* fall through if TTAK OK */ + + case RX_MPDU_RES_STATUS_SEC_WEP_ENC: + if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK)) + return -1; + + stats->flag |= RX_FLAG_DECRYPTED; + return 0; + + default: + IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); + } + + return 0; +} + +/* + * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler + * + * Handles the actual data of the Rx packet from the fw + */ +int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct ieee80211_hdr *hdr; + struct ieee80211_rx_status rx_status = {}; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rx_phy_info *phy_info; + struct iwl_rx_mpdu_res_start *rx_res; + u32 len; + u32 ampdu_status; + u32 rate_n_flags; + u32 rx_pkt_status; + + phy_info = &mvm->last_phy_info; + rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; + hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res)); + len = le16_to_cpu(rx_res->byte_count); + rx_pkt_status = le32_to_cpup((__le32 *) + (pkt->data + sizeof(*rx_res) + len)); + + memset(&rx_status, 0, sizeof(rx_status)); + + /* + * drop the packet if it has failed being decrypted by HW + */ + if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, &rx_status, rx_pkt_status)) { + IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", + rx_pkt_status); + return 0; + } + + if ((unlikely(phy_info->cfg_phy_cnt > 20))) { + IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n", + phy_info->cfg_phy_cnt); + return 0; + } + + if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) || + !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) { + IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); + return 0; + } + + /* This will be used in several places later */ + rate_n_flags = le32_to_cpu(phy_info->rate_n_flags); + + /* rx_status carries information about the packet to mac80211 */ + rx_status.mactime = le64_to_cpu(phy_info->timestamp); + rx_status.device_timestamp = le32_to_cpu(phy_info->system_timestamp); + rx_status.band = + (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), + rx_status.band); + /* + * TSF as indicated by the fw is at INA time, but mac80211 expects the + * TSF at the beginning of the MPDU. + */ + /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ + + /* Find max signal strength (dBm) among 3 antenna/receiver chains */ + rx_status.signal = iwl_mvm_calc_rssi(mvm, phy_info); + + IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal, + (unsigned long long)rx_status.mactime); + + /* + * "antenna number" + * + * It seems that the antenna field in the phy flags value + * is actually a bit field. This is undefined by radiotap, + * it wants an actual antenna number but I always get "7" + * for most legacy frames I receive indicating that the + * same frame was received on all three RX chains. + * + * I think this field should be removed in favor of a + * new 802.11n radiotap field "RX chains" that is defined + * as a bitmask. + */ + rx_status.antenna = (le16_to_cpu(phy_info->phy_flags) & + RX_RES_PHY_FLAGS_ANTENNA) + >> RX_RES_PHY_FLAGS_ANTENNA_POS; + + /* set the preamble flag if appropriate */ + if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) + rx_status.flag |= RX_FLAG_SHORTPRE; + + if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { + /* + * We know which subframes of an A-MPDU belong + * together since we get a single PHY response + * from the firmware for all of them + */ + rx_status.flag |= RX_FLAG_AMPDU_DETAILS; + rx_status.ampdu_reference = mvm->ampdu_ref; + } + + /* Set up the HT phy flags */ + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + break; + case RATE_MCS_CHAN_WIDTH_40: + rx_status.flag |= RX_FLAG_40MHZ; + break; + case RATE_MCS_CHAN_WIDTH_80: + rx_status.flag |= RX_FLAG_80MHZ; + break; + case RATE_MCS_CHAN_WIDTH_160: + rx_status.flag |= RX_FLAG_160MHZ; + break; + } + if (rate_n_flags & RATE_MCS_SGI_MSK) + rx_status.flag |= RX_FLAG_SHORT_GI; + if (rate_n_flags & RATE_HT_MCS_GF_MSK) + rx_status.flag |= RX_FLAG_HT_GF; + if (rate_n_flags & RATE_MCS_HT_MSK) { + rx_status.flag |= RX_FLAG_HT; + rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; + } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + rx_status.vht_nss = + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS) + 1; + rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; + rx_status.flag |= RX_FLAG_VHT; + } else { + rx_status.rate_idx = + iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, + rx_status.band); + } + + iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status, + rxb, &rx_status); + return 0; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c new file mode 100644 index 000000000000..9b21b92aa8d1 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -0,0 +1,442 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <linux/etherdevice.h> +#include <net/mac80211.h> + +#include "mvm.h" +#include "iwl-eeprom-parse.h" +#include "fw-api-scan.h" + +#define IWL_PLCP_QUIET_THRESH 1 +#define IWL_ACTIVE_QUIET_TIME 10 + +static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) +{ + u16 rx_chain; + u8 rx_ant = mvm->nvm_data->valid_rx_ant; + + rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS; + rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS; + rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS; + rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS; + return cpu_to_le16(rx_chain); +} + +static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif) +{ + if (vif->bss_conf.assoc) + return cpu_to_le32(200 * 1024); + else + return 0; +} + +static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif) +{ + if (vif->bss_conf.assoc) + return cpu_to_le32(vif->bss_conf.beacon_int); + else + return 0; +} + +static inline __le32 +iwl_mvm_scan_rxon_flags(struct cfg80211_scan_request *req) +{ + if (req->channels[0]->band == IEEE80211_BAND_2GHZ) + return cpu_to_le32(PHY_BAND_24); + else + return cpu_to_le32(PHY_BAND_5); +} + +static inline __le32 +iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band, + bool no_cck) +{ + u32 tx_ant; + + mvm->scan_last_antenna_idx = + iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant, + mvm->scan_last_antenna_idx); + tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; + + if (band == IEEE80211_BAND_2GHZ && !no_cck) + return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK | + tx_ant); + else + return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant); +} + +/* + * We insert the SSIDs in an inverted order, because the FW will + * invert it back. The most prioritized SSID, which is first in the + * request list, is not copied here, but inserted directly to the probe + * request. + */ +static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd, + struct cfg80211_scan_request *req) +{ + int fw_idx, req_idx; + + fw_idx = 0; + for (req_idx = req->n_ssids - 1; req_idx > 0; req_idx--) { + cmd->direct_scan[fw_idx].id = WLAN_EID_SSID; + cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len; + memcpy(cmd->direct_scan[fw_idx].ssid, + req->ssids[req_idx].ssid, + req->ssids[req_idx].ssid_len); + } +} + +/* + * If req->n_ssids > 0, it means we should do an active scan. + * In case of active scan w/o directed scan, we receive a zero-length SSID + * just to notify that this scan is active and not passive. + * In order to notify the FW of the number of SSIDs we wish to scan (including + * the zero-length one), we need to set the corresponding bits in chan->type, + * one for each SSID, and set the active bit (first). + */ +static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) +{ + if (band == IEEE80211_BAND_2GHZ) + return 30 + 3 * (n_ssids + 1); + return 20 + 2 * (n_ssids + 1); +} + +static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band) +{ + return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; +} + +static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd, + struct cfg80211_scan_request *req) +{ + u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band); + u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band, + req->n_ssids); + struct iwl_scan_channel *chan = (struct iwl_scan_channel *) + (cmd->data + le16_to_cpu(cmd->tx_cmd.len)); + int i; + __le32 chan_type_value; + + if (req->n_ssids > 0) + chan_type_value = cpu_to_le32(BIT(req->n_ssids + 1) - 1); + else + chan_type_value = SCAN_CHANNEL_TYPE_PASSIVE; + + for (i = 0; i < cmd->channel_count; i++) { + chan->channel = cpu_to_le16(req->channels[i]->hw_value); + if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) + chan->type = SCAN_CHANNEL_TYPE_PASSIVE; + else + chan->type = chan_type_value; + chan->active_dwell = cpu_to_le16(active_dwell); + chan->passive_dwell = cpu_to_le16(passive_dwell); + chan->iteration_count = cpu_to_le16(1); + chan++; + } +} + +/* + * Fill in probe request with the following parameters: + * TA is our vif HW address, which mac80211 ensures we have. + * Packet is broadcasted, so this is both SA and DA. + * The probe request IE is made out of two: first comes the most prioritized + * SSID if a directed scan is requested. Second comes whatever extra + * information was given to us as the scan request IE. + */ +static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta, + int n_ssids, const u8 *ssid, int ssid_len, + const u8 *ie, int ie_len, + int left) +{ + int len = 0; + u8 *pos = NULL; + + /* Make sure there is enough space for the probe request, + * two mandatory IEs and the data */ + left -= 24; + if (left < 0) + return 0; + + frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + eth_broadcast_addr(frame->da); + memcpy(frame->sa, ta, ETH_ALEN); + eth_broadcast_addr(frame->bssid); + frame->seq_ctrl = 0; + + len += 24; + + /* for passive scans, no need to fill anything */ + if (n_ssids == 0) + return (u16)len; + + /* points to the payload of the request */ + pos = &frame->u.probe_req.variable[0]; + + /* fill in our SSID IE */ + left -= ssid_len + 2; + if (left < 0) + return 0; + *pos++ = WLAN_EID_SSID; + *pos++ = ssid_len; + if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */ + memcpy(pos, ssid, ssid_len); + pos += ssid_len; + } + + len += ssid_len + 2; + + if (WARN_ON(left < ie_len)) + return len; + + if (ie && ie_len) { + memcpy(pos, ie, ie_len); + len += ie_len; + } + + return (u16)len; +} + +int iwl_mvm_scan_request(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) +{ + struct iwl_host_cmd hcmd = { + .id = SCAN_REQUEST_CMD, + .len = { 0, }, + .data = { mvm->scan_cmd, }, + .flags = CMD_SYNC, + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + struct iwl_scan_cmd *cmd = mvm->scan_cmd; + int ret; + u32 status; + int ssid_len = 0; + u8 *ssid = NULL; + + lockdep_assert_held(&mvm->mutex); + BUG_ON(mvm->scan_cmd == NULL); + + IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n"); + mvm->scan_status = IWL_MVM_SCAN_OS; + memset(cmd, 0, sizeof(struct iwl_scan_cmd) + + mvm->fw->ucode_capa.max_probe_length + + (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel))); + + cmd->channel_count = (u8)req->n_channels; + cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME); + cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH); + cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm); + cmd->max_out_time = iwl_mvm_scan_max_out_time(vif); + cmd->suspend_time = iwl_mvm_scan_suspend_time(vif); + cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req); + cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP | + MAC_FILTER_IN_BEACON); + + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) + cmd->type = cpu_to_le32(SCAN_TYPE_DISCOVERY_FORCED); + else + cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); + + cmd->repeats = cpu_to_le32(1); + + /* + * If the user asked for passive scan, don't change to active scan if + * you see any activity on the channel - remain passive. + */ + if (req->n_ssids > 0) { + cmd->passive2active = cpu_to_le16(1); + ssid = req->ssids[0].ssid; + ssid_len = req->ssids[0].ssid_len; + } else { + cmd->passive2active = 0; + } + + iwl_mvm_scan_fill_ssids(cmd, req); + + cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL); + cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id; + cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); + cmd->tx_cmd.rate_n_flags = + iwl_mvm_scan_rate_n_flags(mvm, req->channels[0]->band, + req->no_cck); + + cmd->tx_cmd.len = + cpu_to_le16(iwl_mvm_fill_probe_req( + (struct ieee80211_mgmt *)cmd->data, + vif->addr, + req->n_ssids, ssid, ssid_len, + req->ie, req->ie_len, + mvm->fw->ucode_capa.max_probe_length)); + + iwl_mvm_scan_fill_channels(cmd, req); + + cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) + + le16_to_cpu(cmd->tx_cmd.len) + + (cmd->channel_count * sizeof(struct iwl_scan_channel))); + hcmd.len[0] = le16_to_cpu(cmd->len); + + status = SCAN_RESPONSE_OK; + ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &status); + if (!ret && status == SCAN_RESPONSE_OK) { + IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n"); + } else { + /* + * If the scan failed, it usually means that the FW was unable + * to allocate the time events. Warn on it, but maybe we + * should try to send the command again with different params. + */ + IWL_ERR(mvm, "Scan failed! status 0x%x ret %d\n", + status, ret); + mvm->scan_status = IWL_MVM_SCAN_NONE; + ret = -EIO; + } + return ret; +} + +int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_cmd_response *resp = (void *)pkt->data; + + IWL_DEBUG_SCAN(mvm, "Scan response received. status 0x%x\n", + le32_to_cpu(resp->status)); + return 0; +} + +int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scan_complete_notif *notif = (void *)pkt->data; + + IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n", + notif->status, notif->scanned_channels); + + mvm->scan_status = IWL_MVM_SCAN_NONE; + ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK); + + return 0; +} + +static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_scan_complete_notif *notif; + u32 *resp; + + switch (pkt->hdr.cmd) { + case SCAN_ABORT_CMD: + resp = (void *)pkt->data; + if (*resp == CAN_ABORT_STATUS) { + IWL_DEBUG_SCAN(mvm, + "Scan can be aborted, wait until completion\n"); + return false; + } + + IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n", + *resp); + return true; + + case SCAN_COMPLETE_NOTIFICATION: + notif = (void *)pkt->data; + IWL_DEBUG_SCAN(mvm, "Scan aborted: status 0x%x\n", + notif->status); + return true; + + default: + WARN_ON(1); + return false; + }; +} + +void iwl_mvm_cancel_scan(struct iwl_mvm *mvm) +{ + struct iwl_notification_wait wait_scan_abort; + static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD, + SCAN_COMPLETE_NOTIFICATION }; + int ret; + + iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort, + scan_abort_notif, + ARRAY_SIZE(scan_abort_notif), + iwl_mvm_scan_abort_notif, NULL); + + ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL); + if (ret) { + IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret); + goto out_remove_notif; + } + + ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, 1 * HZ); + if (ret) + IWL_ERR(mvm, "%s - failed on timeout\n", __func__); + + return; + +out_remove_notif: + iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c new file mode 100644 index 000000000000..861a7f9f8e7f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -0,0 +1,1241 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include <net/mac80211.h> + +#include "mvm.h" +#include "sta.h" + +static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm) +{ + int sta_id; + + WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); + + lockdep_assert_held(&mvm->mutex); + + /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ + for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) + if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex))) + return sta_id; + return IWL_MVM_STATION_COUNT; +} + +/* send station add/update command to firmware */ +int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + bool update) +{ + struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; + struct iwl_mvm_add_sta_cmd add_sta_cmd; + int ret; + u32 status; + u32 agg_size = 0, mpdu_dens = 0; + + memset(&add_sta_cmd, 0, sizeof(add_sta_cmd)); + + add_sta_cmd.sta_id = mvm_sta->sta_id; + add_sta_cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); + if (!update) { + add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); + memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); + } + add_sta_cmd.add_modify = update ? 1 : 0; + + /* STA_FLG_FAT_EN_MSK ? */ + /* STA_FLG_MIMO_EN_MSK ? */ + + if (sta->ht_cap.ht_supported) { + add_sta_cmd.station_flags_msk |= + cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | + STA_FLG_AGG_MPDU_DENS_MSK); + + mpdu_dens = sta->ht_cap.ampdu_density; + } + + if (sta->vht_cap.vht_supported) { + agg_size = sta->vht_cap.cap & + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + agg_size >>= + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + } else if (sta->ht_cap.ht_supported) { + agg_size = sta->ht_cap.ampdu_factor; + } + + add_sta_cmd.station_flags |= + cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); + add_sta_cmd.station_flags |= + cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); + + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd), + &add_sta_cmd, &status); + if (ret) + return ret; + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); + break; + default: + ret = -EIO; + IWL_ERR(mvm, "ADD_STA failed\n"); + break; + } + + return ret; +} + +int iwl_mvm_add_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; + int i, ret, sta_id; + + lockdep_assert_held(&mvm->mutex); + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + sta_id = iwl_mvm_find_free_sta_id(mvm); + else + sta_id = mvm_sta->sta_id; + + if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) + return -ENOSPC; + + spin_lock_init(&mvm_sta->lock); + + mvm_sta->sta_id = sta_id; + mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color); + mvm_sta->vif = vif; + mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + + /* HW restart, don't assume the memory has been zeroed */ + atomic_set(&mvm_sta->pending_frames, 0); + mvm_sta->tid_disable_agg = 0; + mvm_sta->tfd_queue_msk = 0; + for (i = 0; i < IEEE80211_NUM_ACS; i++) + if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) + mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); + + if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) + mvm_sta->tfd_queue_msk |= BIT(vif->cab_queue); + + /* for HW restart - need to reset the seq_number etc... */ + memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data)); + + ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); + if (ret) + return ret; + + /* The first station added is the AP, the others are TDLS STAs */ + if (vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) + mvmvif->ap_sta_id = sta_id; + + rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); + + return 0; +} + +int iwl_mvm_update_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + return iwl_mvm_sta_send_to_fw(mvm, sta, true); +} + +int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + bool drain) +{ + struct iwl_mvm_add_sta_cmd cmd = {}; + int ret; + u32 status; + + lockdep_assert_held(&mvm->mutex); + + cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); + cmd.sta_id = mvmsta->sta_id; + cmd.add_modify = STA_MODE_MODIFY; + cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; + cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); + + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + &cmd, &status); + if (ret) + return ret; + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", + mvmsta->sta_id); + break; + default: + ret = -EIO; + IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", + mvmsta->sta_id); + break; + } + + return ret; +} + +/* + * Remove a station from the FW table. Before sending the command to remove + * the station validate that the station is indeed known to the driver (sanity + * only). + */ +static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { + .sta_id = sta_id, + }; + int ret; + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + + /* Note: internal stations are marked as error values */ + if (!sta) { + IWL_ERR(mvm, "Invalid station id\n"); + return -EINVAL; + } + + ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, CMD_SYNC, + sizeof(rm_sta_cmd), &rm_sta_cmd); + if (ret) { + IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); + return ret; + } + + return 0; +} + +void iwl_mvm_sta_drained_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk); + u8 sta_id; + + /* + * The mutex is needed because of the SYNC cmd, but not only: if the + * work would run concurrently with iwl_mvm_rm_sta, it would run before + * iwl_mvm_rm_sta sets the station as busy, and exit. Then + * iwl_mvm_rm_sta would set the station as busy, and nobody will clean + * that later. + */ + mutex_lock(&mvm->mutex); + + for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) { + int ret; + struct ieee80211_sta *sta = + rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + + /* This station is in use */ + if (!IS_ERR(sta)) + continue; + + if (PTR_ERR(sta) == -EINVAL) { + IWL_ERR(mvm, "Drained sta %d, but it is internal?\n", + sta_id); + continue; + } + + if (!sta) { + IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n", + sta_id); + continue; + } + + WARN_ON(PTR_ERR(sta) != -EBUSY); + /* This station was removed and we waited until it got drained, + * we can now proceed and remove it. + */ + ret = iwl_mvm_rm_sta_common(mvm, sta_id); + if (ret) { + IWL_ERR(mvm, + "Couldn't remove sta %d after it was drained\n", + sta_id); + continue; + } + rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL); + clear_bit(sta_id, mvm->sta_drained); + } + + mutex_unlock(&mvm->mutex); +} + +int iwl_mvm_rm_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id == mvm_sta->sta_id) { + /* + * Put a non-NULL since the fw station isn't removed. + * It will be removed after the MAC will be set as + * unassoc. + */ + rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], + ERR_PTR(-EINVAL)); + + /* flush its queues here since we are freeing mvm_sta */ + ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true); + + /* if we are associated - we can't remove the AP STA now */ + if (vif->bss_conf.assoc) + return ret; + + /* unassoc - go ahead - remove the AP STA now */ + mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + } + + /* + * There are frames pending on the AC queues for this station. + * We need to wait until all the frames are drained... + */ + if (atomic_read(&mvm_sta->pending_frames)) { + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); + rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], + ERR_PTR(-EBUSY)); + } else { + ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); + rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); + } + + return ret; +} + +int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u8 sta_id) +{ + int ret = iwl_mvm_rm_sta_common(mvm, sta_id); + + lockdep_assert_held(&mvm->mutex); + + rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL); + return ret; +} + +int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, + u32 qmask) +{ + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + sta->sta_id = iwl_mvm_find_free_sta_id(mvm); + if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT)) + return -ENOSPC; + } + + sta->tfd_queue_msk = qmask; + + /* put a non-NULL value so iterating over the stations won't stop */ + rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); + return 0; +} + +void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) +{ + rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], NULL); + memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); + sta->sta_id = IWL_MVM_STATION_COUNT; +} + +static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + const u8 *addr, + u16 mac_id, u16 color) +{ + struct iwl_mvm_add_sta_cmd cmd; + int ret; + u32 status; + + lockdep_assert_held(&mvm->mutex); + + memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd)); + cmd.sta_id = sta->sta_id; + cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, + color)); + + cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); + + if (addr) + memcpy(cmd.addr, addr, ETH_ALEN); + + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + &cmd, &status); + if (ret) + return ret; + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_INFO(mvm, "Internal station added.\n"); + return 0; + default: + ret = -EIO; + IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", + status); + break; + } + return ret; +} + +int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + + /* Add the aux station, but without any queues */ + ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0); + if (ret) + return ret; + + ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, + MAC_INDEX_AUX, 0); + + if (ret) + iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); + return ret; +} + +/* + * Send the add station command for the vif's broadcast station. + * Assumes that the station was already allocated. + * + * @mvm: the mvm component + * @vif: the interface to which the broadcast station is added + * @bsta: the broadcast station to add. + */ +int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_int_sta *bsta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT)) + return -ENOSPC; + + return iwl_mvm_add_int_sta_common(mvm, bsta, baddr, + mvmvif->id, mvmvif->color); +} + +/* Send the FW a request to remove the station from it's internal data + * structures, but DO NOT remove the entry from the local data structures. */ +int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *bsta) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_rm_sta_common(mvm, bsta->sta_id); + if (ret) + IWL_WARN(mvm, "Failed sending remove station\n"); + return ret; +} + +/* Allocate a new station entry for the broadcast station to the given vif, + * and send it to the FW. + * Note that each P2P mac should have its own broadcast station. + * + * @mvm: the mvm component + * @vif: the interface to which the broadcast station is added + * @bsta: the broadcast station to add. */ +int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_int_sta *bsta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + u32 qmask; + int ret; + + lockdep_assert_held(&mvm->mutex); + + qmask = iwl_mvm_mac_get_queues_mask(mvm, vif); + ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask); + if (ret) + return ret; + + ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, + mvmvif->id, mvmvif->color); + + if (ret) + iwl_mvm_dealloc_int_sta(mvm, bsta); + return ret; +} + +/* + * Send the FW a request to remove the station from it's internal data + * structures, and in addition remove it from the local data structure. + */ +int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_rm_sta_common(mvm, bsta->sta_id); + if (ret) + return ret; + + iwl_mvm_dealloc_int_sta(mvm, bsta); + return ret; +} + +int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, u16 ssn, bool start) +{ + struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; + struct iwl_mvm_add_sta_cmd cmd = {}; + int ret; + u32 status; + + lockdep_assert_held(&mvm->mutex); + + cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); + cmd.sta_id = mvm_sta->sta_id; + cmd.add_modify = STA_MODE_MODIFY; + cmd.add_immediate_ba_tid = (u8) tid; + cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); + cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : + STA_MODIFY_REMOVE_BA_TID; + + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + &cmd, &status); + if (ret) + return ret; + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n", + start ? "start" : "stopp"); + break; + case ADD_STA_IMMEDIATE_BA_FAILURE: + IWL_WARN(mvm, "RX BA Session refused by fw\n"); + ret = -ENOSPC; + break; + default: + ret = -EIO; + IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", + start ? "start" : "stopp", status); + break; + } + + return ret; +} + +static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, u8 queue, bool start) +{ + struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; + struct iwl_mvm_add_sta_cmd cmd = {}; + int ret; + u32 status; + + lockdep_assert_held(&mvm->mutex); + + if (start) { + mvm_sta->tfd_queue_msk |= BIT(queue); + mvm_sta->tid_disable_agg &= ~BIT(tid); + } else { + mvm_sta->tfd_queue_msk &= ~BIT(queue); + mvm_sta->tid_disable_agg |= BIT(tid); + } + + cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); + cmd.sta_id = mvm_sta->sta_id; + cmd.add_modify = STA_MODE_MODIFY; + cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX; + cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); + cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); + + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + &cmd, &status); + if (ret) + return ret; + + switch (status) { + case ADD_STA_SUCCESS: + break; + default: + ret = -EIO; + IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", + start ? "start" : "stopp", status); + break; + } + + return ret; +} + +static const u8 tid_to_ac[] = { + IEEE80211_AC_BE, + IEEE80211_AC_BK, + IEEE80211_AC_BK, + IEEE80211_AC_BE, + IEEE80211_AC_VI, + IEEE80211_AC_VI, + IEEE80211_AC_VO, + IEEE80211_AC_VO, +}; + +int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) +{ + struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; + struct iwl_mvm_tid_data *tid_data; + int txq_id; + + if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) + return -EINVAL; + + if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) { + IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n", + mvmsta->tid_data[tid].state); + return -ENXIO; + } + + lockdep_assert_held(&mvm->mutex); + + for (txq_id = IWL_MVM_FIRST_AGG_QUEUE; + txq_id <= IWL_MVM_LAST_AGG_QUEUE; txq_id++) + if (mvm->queue_to_mac80211[txq_id] == + IWL_INVALID_MAC80211_QUEUE) + break; + + if (txq_id > IWL_MVM_LAST_AGG_QUEUE) { + IWL_ERR(mvm, "Failed to allocate agg queue\n"); + return -EIO; + } + + /* the new tx queue is still connected to the same mac80211 queue */ + mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_ac[tid]]; + + spin_lock_bh(&mvmsta->lock); + tid_data = &mvmsta->tid_data[tid]; + tid_data->ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->txq_id = txq_id; + *ssn = tid_data->ssn; + + IWL_DEBUG_TX_QUEUES(mvm, + "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", + mvmsta->sta_id, tid, txq_id, tid_data->ssn, + tid_data->next_reclaimed); + + if (tid_data->ssn == tid_data->next_reclaimed) { + tid_data->state = IWL_AGG_STARTING; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + } else { + tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; + } + + spin_unlock_bh(&mvmsta->lock); + + return 0; +} + +int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u8 buf_size) +{ + struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + int queue, fifo, ret; + u16 ssn; + + buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); + + spin_lock_bh(&mvmsta->lock); + ssn = tid_data->ssn; + queue = tid_data->txq_id; + tid_data->state = IWL_AGG_ON; + tid_data->ssn = 0xffff; + spin_unlock_bh(&mvmsta->lock); + + fifo = iwl_mvm_ac_to_tx_fifo[tid_to_ac[tid]]; + + ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); + if (ret) + return -EIO; + + iwl_trans_txq_enable(mvm->trans, queue, fifo, mvmsta->sta_id, tid, + buf_size, ssn); + + /* + * Even though in theory the peer could have different + * aggregation reorder buffer sizes for different sessions, + * our ucode doesn't allow for that and has a global limit + * for each station. Therefore, use the minimum of all the + * aggregation sessions and our default value. + */ + mvmsta->max_agg_bufsize = + min(mvmsta->max_agg_bufsize, buf_size); + mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; + + if (mvm->cfg->ht_params->use_rts_for_aggregation) { + /* + * switch to RTS/CTS if it is the prefer protection + * method for HT traffic + */ + mvmsta->lq_sta.lq.flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK; + /* + * TODO: remove the TLC_RTS flag when we tear down the last + * AGG session (agg_tids_count in DVM) + */ + } + + IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", + sta->addr, tid); + + return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, CMD_ASYNC, false); +} + +int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid) +{ + struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + u16 txq_id; + int err; + + spin_lock_bh(&mvmsta->lock); + + txq_id = tid_data->txq_id; + + IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", + mvmsta->sta_id, tid, txq_id, tid_data->state); + + switch (tid_data->state) { + case IWL_AGG_ON: + tid_data->ssn = SEQ_TO_SN(tid_data->seq_number); + + IWL_DEBUG_TX_QUEUES(mvm, + "ssn = %d, next_recl = %d\n", + tid_data->ssn, tid_data->next_reclaimed); + + /* There are still packets for this RA / TID in the HW */ + if (tid_data->ssn != tid_data->next_reclaimed) { + tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA; + err = 0; + break; + } + + tid_data->ssn = 0xffff; + iwl_trans_txq_disable(mvm->trans, txq_id); + /* fall through */ + case IWL_AGG_STARTING: + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* + * The agg session has been stopped before it was set up. This + * can happen when the AddBA timer times out for example. + */ + + /* No barriers since we are under mutex */ + lockdep_assert_held(&mvm->mutex); + mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE; + + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + tid_data->state = IWL_AGG_OFF; + err = 0; + break; + default: + IWL_ERR(mvm, + "Stopping AGG while state not ON or starting for %d on %d (%d)\n", + mvmsta->sta_id, tid, tid_data->state); + IWL_ERR(mvm, + "\ttid_data->txq_id = %d\n", tid_data->txq_id); + err = -EINVAL; + } + + spin_unlock_bh(&mvmsta->lock); + + return err; +} + +static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) +{ + int i; + + lockdep_assert_held(&mvm->mutex); + + i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM); + + if (i == STA_KEY_MAX_NUM) + return STA_KEY_IDX_INVALID; + + __set_bit(i, mvm->fw_key_table); + + return i; +} + +static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv; + + if (sta) { + struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; + + return mvm_sta->sta_id; + } + + /* + * The device expects GTKs for station interfaces to be + * installed as GTKs for the AP station. If we have no + * station ID, then use AP's station ID. + */ + if (vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) + return mvmvif->ap_sta_id; + + return IWL_INVALID_STATION; +} + +static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + struct ieee80211_key_conf *keyconf, + u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k, + u32 cmd_flags) +{ + __le16 key_flags; + struct iwl_mvm_add_sta_cmd cmd = {}; + int ret, status; + u16 keyidx; + int i; + + keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & + STA_KEY_FLG_KEYID_MSK; + key_flags = cpu_to_le16(keyidx); + key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); + cmd.key.tkip_rx_tsc_byte2 = tkip_iv32; + for (i = 0; i < 5; i++) + cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); + memcpy(cmd.key.key, keyconf->key, keyconf->keylen); + break; + case WLAN_CIPHER_SUITE_CCMP: + key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); + memcpy(cmd.key.key, keyconf->key, keyconf->keylen); + break; + default: + WARN_ON(1); + return -EINVAL; + } + + if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + key_flags |= cpu_to_le16(STA_KEY_MULTICAST); + + cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); + cmd.key.key_offset = keyconf->hw_key_idx; + cmd.key.key_flags = key_flags; + cmd.add_modify = STA_MODE_MODIFY; + cmd.modify_mask = STA_MODIFY_KEY; + cmd.sta_id = sta_id; + + status = ADD_STA_SUCCESS; + if (cmd_flags == CMD_SYNC) + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + &cmd, &status); + else + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, + sizeof(cmd), &cmd); + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); + break; + default: + ret = -EIO; + IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); + break; + } + + return ret; +} + +static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, + struct ieee80211_key_conf *keyconf, + u8 sta_id, bool remove_key) +{ + struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; + + /* verify the key details match the required command's expectations */ + if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) || + (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || + (keyconf->keyidx != 4 && keyconf->keyidx != 5))) + return -EINVAL; + + igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); + igtk_cmd.sta_id = cpu_to_le32(sta_id); + + if (remove_key) { + igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); + } else { + struct ieee80211_key_seq seq; + const u8 *pn; + + memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen); + ieee80211_aes_cmac_calculate_k1_k2(keyconf, + igtk_cmd.K1, igtk_cmd.K2); + ieee80211_get_key_rx_seq(keyconf, 0, &seq); + pn = seq.aes_cmac.pn; + igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | + ((u64) pn[4] << 8) | + ((u64) pn[3] << 16) | + ((u64) pn[2] << 24) | + ((u64) pn[1] << 32) | + ((u64) pn[0] << 40)); + } + + IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n", + remove_key ? "removing" : "installing", + igtk_cmd.sta_id); + + return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, CMD_SYNC, + sizeof(igtk_cmd), &igtk_cmd); +} + + +static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv; + + if (sta) + return sta->addr; + + if (vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { + u8 sta_id = mvmvif->ap_sta_id; + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + return sta->addr; + } + + + return NULL; +} + +int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf, + bool have_key_offset) +{ + struct iwl_mvm_sta *mvm_sta; + int ret; + u8 *addr, sta_id; + struct ieee80211_key_seq seq; + u16 p1k[5]; + + lockdep_assert_held(&mvm->mutex); + + /* Get the station id from the mvm local station table */ + sta_id = iwl_mvm_get_key_sta_id(vif, sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(mvm, "Failed to find station id\n"); + return -EINVAL; + } + + if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); + goto end; + } + + /* + * It is possible that the 'sta' parameter is NULL, and thus + * there is a need to retrieve the sta from the local station table. + */ + if (!sta) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta)) { + IWL_ERR(mvm, "Invalid station id\n"); + return -EINVAL; + } + } + + mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv; + if (WARN_ON_ONCE(mvm_sta->vif != vif)) + return -EINVAL; + + if (!have_key_offset) { + /* + * The D3 firmware hardcodes the PTK offset to 0, so we have to + * configure it there. As a result, this workaround exists to + * let the caller set the key offset (hw_key_idx), see d3.c. + */ + keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm); + if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) + return -ENOSPC; + } + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + addr = iwl_mvm_get_mac_addr(mvm, vif, sta); + /* get phase 1 key from mac80211 */ + ieee80211_get_key_rx_seq(keyconf, 0, &seq); + ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); + ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id, + seq.tkip.iv32, p1k, CMD_SYNC); + break; + case WLAN_CIPHER_SUITE_CCMP: + ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id, + 0, NULL, CMD_SYNC); + break; + default: + IWL_ERR(mvm, "Unknown cipher %x\n", keyconf->cipher); + ret = -EINVAL; + } + + if (ret) + __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); + +end: + IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", + keyconf->cipher, keyconf->keylen, keyconf->keyidx, + sta->addr, ret); + return ret; +} + +int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf) +{ + struct iwl_mvm_sta *mvm_sta; + struct iwl_mvm_add_sta_cmd cmd = {}; + __le16 key_flags; + int ret, status; + u8 sta_id; + + lockdep_assert_held(&mvm->mutex); + + /* Get the station id from the mvm local station table */ + sta_id = iwl_mvm_get_key_sta_id(vif, sta); + + IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", + keyconf->keyidx, sta_id); + + if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) + return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); + + ret = __test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); + if (!ret) { + IWL_ERR(mvm, "offset %d not used in fw key table.\n", + keyconf->hw_key_idx); + return -ENOENT; + } + + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); + return 0; + } + + /* + * It is possible that the 'sta' parameter is NULL, and thus + * there is a need to retrieve the sta from the local station table, + * for example when a GTK is removed (where the sta_id will then be + * the AP ID, and no station was passed by mac80211.) + */ + if (!sta) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + if (!sta) { + IWL_ERR(mvm, "Invalid station id\n"); + return -EINVAL; + } + } + + mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv; + if (WARN_ON_ONCE(mvm_sta->vif != vif)) + return -EINVAL; + + key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & + STA_KEY_FLG_KEYID_MSK); + key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); + key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); + + if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + key_flags |= cpu_to_le16(STA_KEY_MULTICAST); + + cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); + cmd.key.key_flags = key_flags; + cmd.key.key_offset = keyconf->hw_key_idx; + cmd.sta_id = sta_id; + + cmd.modify_mask = STA_MODIFY_KEY; + cmd.add_modify = STA_MODE_MODIFY; + + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + &cmd, &status); + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); + break; + default: + ret = -EIO; + IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); + break; + } + + return ret; +} + +void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, + u16 *phase1key) +{ + struct iwl_mvm_sta *mvm_sta; + u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta); + + if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION)) + return; + + rcu_read_lock(); + + if (!sta) { + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (WARN_ON(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + return; + } + } + + mvm_sta = (void *)sta->drv_priv; + iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id, + iv32, phase1key, CMD_ASYNC); + rcu_read_unlock(); +} + +void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; + struct iwl_mvm_add_sta_cmd cmd = { + .add_modify = STA_MODE_MODIFY, + .sta_id = mvmsta->sta_id, + .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, + .sleep_state_flags = cpu_to_le16(STA_SLEEP_STATE_AWAKE), + .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), + }; + int ret; + + /* + * Same modify mask for sleep_tx_count and sleep_state_flags but this + * should be fine since if we set the STA as "awake", then + * sleep_tx_count is not relevant. + */ + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); +} + +void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + enum ieee80211_frame_release_type reason, + u16 cnt) +{ + u16 sleep_state_flags = + (reason == IEEE80211_FRAME_RELEASE_UAPSD) ? + STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL; + struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; + struct iwl_mvm_add_sta_cmd cmd = { + .add_modify = STA_MODE_MODIFY, + .sta_id = mvmsta->sta_id, + .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, + .sleep_tx_count = cpu_to_le16(cnt), + .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), + /* + * Same modify mask for sleep_tx_count and sleep_state_flags so + * we must set the sleep_state_flags too. + */ + .sleep_state_flags = cpu_to_le16(sleep_state_flags), + }; + int ret; + + /* TODO: somehow the fw doesn't seem to take PS_POLL into account */ + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h new file mode 100644 index 000000000000..896f88ac8145 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h @@ -0,0 +1,374 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __sta_h__ +#define __sta_h__ + +#include <linux/spinlock.h> +#include <net/mac80211.h> +#include <linux/wait.h> + +#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */ +#include "fw-api.h" /* IWL_MVM_STATION_COUNT */ +#include "rs.h" + +struct iwl_mvm; + +/** + * DOC: station table - introduction + * + * The station table is a list of data structure that reprensent the stations. + * In STA/P2P client mode, the driver will hold one station for the AP/ GO. + * In GO/AP mode, the driver will have as many stations as associated clients. + * All these stations are reflected in the fw's station table. The driver + * keeps the fw's station table up to date with the ADD_STA command. Stations + * can be removed by the REMOVE_STA command. + * + * All the data related to a station is held in the structure %iwl_mvm_sta + * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area. + * This data includes the index of the station in the fw, per tid information + * (sequence numbers, Block-ack state machine, etc...). The stations are + * created and deleted by the %sta_state callback from %ieee80211_ops. + * + * The driver holds a map: %fw_id_to_mac_id that allows to fetch a + * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw + * station index. That way, the driver is able to get the tid related data in + * O(1) in time sensitive paths (Tx / Tx response / BA notification). These + * paths are triggered by the fw, and the driver needs to get a pointer to the + * %ieee80211 structure. This map helps to get that pointer quickly. + */ + +/** + * DOC: station table - locking + * + * As stated before, the station is created / deleted by mac80211's %sta_state + * callback from %ieee80211_ops which can sleep. The next paragraph explains + * the locking of a single stations, the next ones relates to the station + * table. + * + * The station holds the sequence number per tid. So this data needs to be + * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack + * information (the state machine / and the logic that checks if the queues + * were drained), so it also needs to be accessible from the Tx response flow. + * In short, the station needs to be access from sleepable context as well as + * from tasklets, so the station itself needs a spinlock. + * + * The writers of %fw_id_to_mac_id map are serialized by the global mutex of + * the mvm op_mode. This is possible since %sta_state can sleep. + * The pointers in this map are RCU protected, hence we won't replace the + * station while we have Tx / Tx response / BA notification running. + * + * If a station is deleted while it still has packets in its A-MPDU queues, + * then the reclaim flow will notice that there is no station in the map for + * sta_id and it will dump the responses. + */ + +/** + * DOC: station table - internal stations + * + * The FW needs a few internal stations that are not reflected in + * mac80211, such as broadcast station in AP / GO mode, or AUX sta for + * scanning and P2P device (during the GO negotiation). + * For these kind of stations we have %iwl_mvm_int_sta struct which holds the + * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta. + * Usually the data for these stations is static, so no locking is required, + * and no TID data as this is also not needed. + * One thing to note, is that these stations have an ID in the fw, but not + * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id + * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of + * pointers from this mapping need to check that the value is not error + * or NULL. + * + * Currently there is only one auxiliary station for scanning, initialized + * on init. + */ + +/** + * DOC: station table - AP Station in STA mode + * + * %iwl_mvm_vif includes the index of the AP station in the fw's STA table: + * %ap_sta_id. To get the point to the coresponsding %ieee80211_sta, + * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove + * the AP station from the fw before setting the MAC context as unassociated. + * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is + * removed by mac80211, but the station won't be removed in the fw until the + * VIF is set as unassociated. Then, %ap_sta_id will be invalidated. + */ + +/** + * DOC: station table - Drain vs. Flush + * + * Flush means that all the frames in the SCD queue are dumped regardless the + * station to which they were sent. We do that when we disassociate and before + * we remove the STA of the AP. The flush can be done synchronously against the + * fw. + * Drain means that the fw will drop all the frames sent to a specific station. + * This is useful when a client (if we are IBSS / GO or AP) disassociates. In + * that case, we need to drain all the frames for that client from the AC queues + * that are shared with the other clients. Only then, we can remove the STA in + * the fw. In order to do so, we track the non-AMPDU packets for each station. + * If mac80211 removes a STA and if it still has non-AMPDU packets pending in + * the queues, we mark this station as %EBUSY in %fw_id_to_mac_id, and drop all + * the frames for this STA (%iwl_mvm_rm_sta). When the last frame is dropped + * (we know about it with its Tx response), we remove the station in fw and set + * it as %NULL in %fw_id_to_mac_id: this is the purpose of + * %iwl_mvm_sta_drained_wk. + */ + +/** + * DOC: station table - fw restart + * + * When the fw asserts, or we have any other issue that requires to reset the + * driver, we require mac80211 to reconfigure the driver. Since the private + * data of the stations is embed in mac80211's %ieee80211_sta, that data will + * not be zeroed and needs to be reinitialized manually. + * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us + * that we must not allocate a new sta_id but reuse the previous one. This + * means that the stations being re-added after the reset will have the same + * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id + * map, since the stations aren't in the fw any more. Internal stations that + * are not added by mac80211 will be re-added in the init flow that is called + * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to + * %iwl_mvm_up. + */ + +/** + * DOC: AP mode - PS + * + * When a station is asleep, the fw will set it as "asleep". All the + * non-aggregation frames to that station will be dropped by the fw + * (%TX_STATUS_FAIL_DEST_PS failure code). + * AMPDUs are in a separate queue that is stopped by the fw. We just need to + * let mac80211 know how many frames we have in these queues so that it can + * properly handle trigger frames. + * When the a trigger frame is received, mac80211 tells the driver to send + * frames from the AMPDU queues or AC queue depending on which queue are + * delivery-enabled and what TID has frames to transmit (Note that mac80211 has + * all the knowledege since all the non-agg frames are buffered / filtered, and + * the driver tells mac80211 about agg frames). The driver needs to tell the fw + * to let frames out even if the station is asleep. This is done by + * %iwl_mvm_sta_modify_sleep_tx_count. + * When we receive a frame from that station with PM bit unset, the + * driver needs to let the fw know that this station isn't alseep any more. + * This is done by %iwl_mvm_sta_modify_ps_wake. + * + * TODO - EOSP handling + */ + +/** + * enum iwl_mvm_agg_state + * + * The state machine of the BA agreement establishment / tear down. + * These states relate to a specific RA / TID. + * + * @IWL_AGG_OFF: aggregation is not used + * @IWL_AGG_STARTING: aggregation are starting (between start and oper) + * @IWL_AGG_ON: aggregation session is up + * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the + * HW queue to be empty from packets for this RA /TID. + * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the + * HW queue to be empty from packets for this RA /TID. + */ +enum iwl_mvm_agg_state { + IWL_AGG_OFF = 0, + IWL_AGG_STARTING, + IWL_AGG_ON, + IWL_EMPTYING_HW_QUEUE_ADDBA, + IWL_EMPTYING_HW_QUEUE_DELBA, +}; + +/** + * struct iwl_mvm_tid_data - holds the states for each RA / TID + * @seq_number: the next WiFi sequence number to use + * @next_reclaimed: the WiFi sequence number of the next packet to be acked. + * This is basically (last acked packet++). + * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the + * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). + * @state: state of the BA agreement establishment / tear down. + * @txq_id: Tx queue used by the BA session + * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or + * the first packet to be sent in legacy HW queue in Tx AGG stop flow. + * Basically when next_reclaimed reaches ssn, we can tell mac80211 that + * we are ready to finish the Tx AGG stop / start flow. + * @wait_for_ba: Expect block-ack before next Tx reply + */ +struct iwl_mvm_tid_data { + u16 seq_number; + u16 next_reclaimed; + /* The rest is Tx AGG related */ + u32 rate_n_flags; + enum iwl_mvm_agg_state state; + u16 txq_id; + u16 ssn; + bool wait_for_ba; +}; + +/** + * struct iwl_mvm_sta - representation of a station in the driver + * @sta_id: the index of the station in the fw (will be replaced by id_n_color) + * @tfd_queue_msk: the tfd queues used by the station + * @mac_id_n_color: the MAC context this station is linked to + * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for + * tid. + * @max_agg_bufsize: the maximal size of the AGG buffer for this station + * @lock: lock to protect the whole struct. Since %tid_data is access from Tx + * and from Tx response flow, it needs a spinlock. + * @pending_frames: number of frames for this STA on the shared Tx queues. + * @tid_data: per tid data. Look at %iwl_mvm_tid_data. + * + * When mac80211 creates a station it reserves some space (hw->sta_data_size) + * in the structure for use by driver. This structure is placed in that + * space. + * + */ +struct iwl_mvm_sta { + u32 sta_id; + u32 tfd_queue_msk; + u32 mac_id_n_color; + u16 tid_disable_agg; + u8 max_agg_bufsize; + spinlock_t lock; + atomic_t pending_frames; + struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; + struct iwl_lq_sta lq_sta; + struct ieee80211_vif *vif; + +#ifdef CONFIG_PM_SLEEP + u16 last_seq_ctl; +#endif +}; + +/** + * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or + * broadcast) + * @sta_id: the index of the station in the fw (will be replaced by id_n_color) + * @tfd_queue_msk: the tfd queues used by the station + */ +struct iwl_mvm_int_sta { + u32 sta_id; + u32 tfd_queue_msk; +}; + +int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + bool update); +int iwl_mvm_add_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_update_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_rm_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u8 sta_id); +int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + bool have_key_offset); +int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf); + +void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, + u16 *phase1key); + +/* AMPDU */ +int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, u16 ssn, bool start); +int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn); +int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u8 buf_size); +int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid); + +int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); +int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, + u32 qmask); +void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta); +int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_int_sta *bsta); +int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *bsta); +int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_int_sta *bsta); +int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta); +void iwl_mvm_sta_drained_wk(struct work_struct *wk); +void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, + struct ieee80211_sta *sta); +void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + enum ieee80211_frame_release_type reason, + u16 cnt); +int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + bool drain); + +#endif /* __sta_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c new file mode 100644 index 000000000000..e437e02c7149 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -0,0 +1,519 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <linux/jiffies.h> +#include <net/mac80211.h> + +#include "iwl-notif-wait.h" +#include "iwl-trans.h" +#include "fw-api.h" +#include "time-event.h" +#include "mvm.h" +#include "iwl-io.h" +#include "iwl-prph.h" + +/* A TimeUnit is 1024 microsecond */ +#define TU_TO_JIFFIES(_tu) (usecs_to_jiffies((_tu) * 1024)) +#define MSEC_TO_TU(_msec) (_msec*1000/1024) + +/* For ROC use a TE type which has priority high enough to be scheduled when + * there is a concurrent BSS or GO/AP. Currently, use a TE type that has + * priority similar to the TE priority used for action scans by the FW. + * TODO: This needs to be changed, based on the reason for the ROC, i.e., use + * TE_P2P_DEVICE_DISCOVERABLE for remain on channel without mgmt skb, and use + * TE_P2P_DEVICE_ACTION_SCAN + */ +#define IWL_MVM_ROC_TE_TYPE TE_P2P_DEVICE_ACTION_SCAN + +void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, + struct iwl_mvm_time_event_data *te_data) +{ + lockdep_assert_held(&mvm->time_event_lock); + + if (te_data->id == TE_MAX) + return; + + list_del(&te_data->list); + te_data->running = false; + te_data->uid = 0; + te_data->id = TE_MAX; + te_data->vif = NULL; +} + +void iwl_mvm_roc_done_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); + + synchronize_net(); + + /* + * Flush the offchannel queue -- this is called when the time + * event finishes or is cancelled, so that frames queued for it + * won't get stuck on the queue and be transmitted in the next + * time event. + * We have to send the command asynchronously since this cannot + * be under the mutex for locking reasons, but that's not an + * issue as it will have to complete before the next command is + * executed, and a new time event means a new command. + */ + iwl_mvm_flush_tx_path(mvm, BIT(IWL_OFFCHANNEL_QUEUE), false); +} + +static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) +{ + /* + * First, clear the ROC_RUNNING status bit. This will cause the TX + * path to drop offchannel transmissions. That would also be done + * by mac80211, but it is racy, in particular in the case that the + * time event actually completed in the firmware (which is handled + * in iwl_mvm_te_handle_notif). + */ + clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); + + /* + * Of course, our status bit is just as racy as mac80211, so in + * addition, fire off the work struct which will drop all frames + * from the hardware queues that made it through the race. First + * it will of course synchronize the TX path to make sure that + * any *new* TX will be rejected. + */ + schedule_work(&mvm->roc_done_wk); +} + +/* + * Handles a FW notification for an event that is known to the driver. + * + * @mvm: the mvm component + * @te_data: the time event data + * @notif: the notification data corresponding the time event data. + */ +static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, + struct iwl_mvm_time_event_data *te_data, + struct iwl_time_event_notif *notif) +{ + lockdep_assert_held(&mvm->time_event_lock); + + IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n", + le32_to_cpu(notif->unique_id), + le32_to_cpu(notif->action)); + + /* + * The FW sends the start/end time event notifications even for events + * that it fails to schedule. This is indicated in the status field of + * the notification. This happens in cases that the scheduler cannot + * find a schedule that can handle the event (for example requesting a + * P2P Device discoveribility, while there are other higher priority + * events in the system). + */ + WARN_ONCE(!le32_to_cpu(notif->status), + "Failed to schedule time event\n"); + + if (le32_to_cpu(notif->action) == TE_NOTIF_HOST_END) { + IWL_DEBUG_TE(mvm, + "TE ended - current time %lu, estimated end %lu\n", + jiffies, te_data->end_jiffies); + + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { + ieee80211_remain_on_channel_expired(mvm->hw); + iwl_mvm_roc_finished(mvm); + } + + /* + * By now, we should have finished association + * and know the dtim period. + */ + if (te_data->vif->type == NL80211_IFTYPE_STATION && + (!te_data->vif->bss_conf.assoc || + !te_data->vif->bss_conf.dtim_period)) { + IWL_ERR(mvm, + "No assocation and the time event is over already...\n"); + ieee80211_connection_loss(te_data->vif); + } + + iwl_mvm_te_clear_data(mvm, te_data); + } else if (le32_to_cpu(notif->action) == TE_NOTIF_HOST_START) { + te_data->running = true; + te_data->end_jiffies = jiffies + + TU_TO_JIFFIES(te_data->duration); + + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { + set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); + ieee80211_ready_on_channel(mvm->hw); + } + } else { + IWL_WARN(mvm, "Got TE with unknown action\n"); + } +} + +/* + * The Rx handler for time event notifications + */ +int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_time_event_notif *notif = (void *)pkt->data; + struct iwl_mvm_time_event_data *te_data, *tmp; + + IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n", + le32_to_cpu(notif->unique_id), + le32_to_cpu(notif->action)); + + spin_lock_bh(&mvm->time_event_lock); + list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) { + if (le32_to_cpu(notif->unique_id) == te_data->uid) + iwl_mvm_te_handle_notif(mvm, te_data, notif); + } + spin_unlock_bh(&mvm->time_event_lock); + + return 0; +} + +static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_mvm_time_event_data *te_data = data; + struct iwl_time_event_resp *resp; + int resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + + if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD)) + return true; + + if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) { + IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n"); + return true; + } + + resp = (void *)pkt->data; + + /* we should never get a response to another TIME_EVENT_CMD here */ + if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id)) + return false; + + te_data->uid = le32_to_cpu(resp->unique_id); + IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", + te_data->uid); + return true; +} + +static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_time_event_data *te_data, + struct iwl_time_event_cmd *te_cmd) +{ + static const u8 time_event_response[] = { TIME_EVENT_CMD }; + struct iwl_notification_wait wait_time_event; + int ret; + + lockdep_assert_held(&mvm->mutex); + + IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n", + le32_to_cpu(te_cmd->duration)); + + spin_lock_bh(&mvm->time_event_lock); + if (WARN_ON(te_data->id != TE_MAX)) { + spin_unlock_bh(&mvm->time_event_lock); + return -EIO; + } + te_data->vif = vif; + te_data->duration = le32_to_cpu(te_cmd->duration); + te_data->id = le32_to_cpu(te_cmd->id); + list_add_tail(&te_data->list, &mvm->time_event_list); + spin_unlock_bh(&mvm->time_event_lock); + + /* + * Use a notification wait, which really just processes the + * command response and doesn't wait for anything, in order + * to be able to process the response and get the UID inside + * the RX path. Using CMD_WANT_SKB doesn't work because it + * stores the buffer and then wakes up this thread, by which + * time another notification (that the time event started) + * might already be processed unsuccessfully. + */ + iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, + time_event_response, + ARRAY_SIZE(time_event_response), + iwl_mvm_time_event_response, te_data); + + ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC, + sizeof(*te_cmd), te_cmd); + if (ret) { + IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret); + iwl_remove_notification(&mvm->notif_wait, &wait_time_event); + goto out_clear_te; + } + + /* No need to wait for anything, so just pass 1 (0 isn't valid) */ + ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); + /* should never fail */ + WARN_ON_ONCE(ret); + + if (ret) { + out_clear_te: + spin_lock_bh(&mvm->time_event_lock); + iwl_mvm_te_clear_data(mvm, te_data); + spin_unlock_bh(&mvm->time_event_lock); + } + return ret; +} + +void iwl_mvm_protect_session(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 duration, u32 min_duration) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; + struct iwl_time_event_cmd time_cmd = {}; + + lockdep_assert_held(&mvm->mutex); + + if (te_data->running && + time_after(te_data->end_jiffies, + jiffies + TU_TO_JIFFIES(min_duration))) { + IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", + jiffies_to_msecs(te_data->end_jiffies - jiffies)); + return; + } + + if (te_data->running) { + IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n", + te_data->uid, + jiffies_to_msecs(te_data->end_jiffies - jiffies)); + /* + * we don't have enough time + * cancel the current TE and issue a new one + * Of course it would be better to remove the old one only + * when the new one is added, but we don't care if we are off + * channel for a bit. All we need to do, is not to return + * before we actually begin to be on the channel. + */ + iwl_mvm_stop_session_protection(mvm, vif); + } + + time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); + time_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); + time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC); + + time_cmd.apply_time = + cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG)); + + time_cmd.dep_policy = TE_INDEPENDENT; + time_cmd.is_present = cpu_to_le32(1); + time_cmd.max_frags = cpu_to_le32(TE_FRAG_NONE); + time_cmd.max_delay = cpu_to_le32(500); + /* TODO: why do we need to interval = bi if it is not periodic? */ + time_cmd.interval = cpu_to_le32(1); + time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1)); + time_cmd.duration = cpu_to_le32(duration); + time_cmd.repeat = cpu_to_le32(1); + time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_START | TE_NOTIF_HOST_END); + + iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); +} + +/* + * Explicit request to remove a time event. The removal of a time event needs to + * be synchronized with the flow of a time event's end notification, which also + * removes the time event from the op mode data structures. + */ +void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + struct iwl_mvm_time_event_data *te_data) +{ + struct iwl_time_event_cmd time_cmd = {}; + u32 id, uid; + int ret; + + /* + * It is possible that by the time we got to this point the time + * event was already removed. + */ + spin_lock_bh(&mvm->time_event_lock); + + /* Save time event uid before clearing its data */ + uid = te_data->uid; + id = te_data->id; + + /* + * The clear_data function handles time events that were already removed + */ + iwl_mvm_te_clear_data(mvm, te_data); + spin_unlock_bh(&mvm->time_event_lock); + + /* + * It is possible that by the time we try to remove it, the time event + * has already ended and removed. In such a case there is no need to + * send a removal command. + */ + if (id == TE_MAX) { + IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", uid); + return; + } + + /* When we remove a TE, the UID is to be set in the id field */ + time_cmd.id = cpu_to_le32(uid); + time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); + time_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); + + IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); + ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC, + sizeof(time_cmd), &time_cmd); + if (WARN_ON(ret)) + return; +} + +void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; + + lockdep_assert_held(&mvm->mutex); + iwl_mvm_remove_time_event(mvm, mvmvif, te_data); +} + +int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + int duration) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; + struct iwl_time_event_cmd time_cmd = {}; + + lockdep_assert_held(&mvm->mutex); + if (te_data->running) { + IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n"); + return -EBUSY; + } + + /* + * Flush the done work, just in case it's still pending, so that + * the work it does can complete and we can accept new frames. + */ + flush_work(&mvm->roc_done_wk); + + time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); + time_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); + time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE); + + time_cmd.apply_time = cpu_to_le32(0); + time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT); + time_cmd.is_present = cpu_to_le32(1); + + time_cmd.interval = cpu_to_le32(1); + + /* + * IWL_MVM_ROC_TE_TYPE can have lower priority than other events + * that are being scheduled by the driver/fw, and thus it might not be + * scheduled. To improve the chances of it being scheduled, allow it to + * be fragmented. + * In addition, for the same reasons, allow to delay the scheduling of + * the time event. + */ + time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20); + time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2)); + time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); + time_cmd.repeat = cpu_to_le32(1); + time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_START | TE_NOTIF_HOST_END); + + return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); +} + +void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm) +{ + struct iwl_mvm_vif *mvmvif; + struct iwl_mvm_time_event_data *te_data; + + lockdep_assert_held(&mvm->mutex); + + /* + * Iterate over the list of time events and find the time event that is + * associated with a P2P_DEVICE interface. + * This assumes that a P2P_DEVICE interface can have only a single time + * event at any given time and this time event coresponds to a ROC + * request + */ + mvmvif = NULL; + spin_lock_bh(&mvm->time_event_lock); + list_for_each_entry(te_data, &mvm->time_event_list, list) { + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { + mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); + break; + } + } + spin_unlock_bh(&mvm->time_event_lock); + + if (!mvmvif) { + IWL_WARN(mvm, "P2P_DEVICE no remain on channel event\n"); + return; + } + + iwl_mvm_remove_time_event(mvm, mvmvif, te_data); + + iwl_mvm_roc_finished(mvm); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h new file mode 100644 index 000000000000..64fb57a5ab43 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h @@ -0,0 +1,214 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __time_event_h__ +#define __time_event_h__ + +#include "fw-api.h" + +#include "mvm.h" + +/** + * DOC: Time Events - what is it? + * + * Time Events are a fw feature that allows the driver to control the presence + * of the device on the channel. Since the fw supports multiple channels + * concurrently, the fw may choose to jump to another channel at any time. + * In order to make sure that the fw is on a specific channel at a certain time + * and for a certain duration, the driver needs to issue a time event. + * + * The simplest example is for BSS association. The driver issues a time event, + * waits for it to start, and only then tells mac80211 that we can start the + * association. This way, we make sure that the association will be done + * smoothly and won't be interrupted by channel switch decided within the fw. + */ + + /** + * DOC: The flow against the fw + * + * When the driver needs to make sure we are in a certain channel, at a certain + * time and for a certain duration, it sends a Time Event. The flow against the + * fw goes like this: + * 1) Driver sends a TIME_EVENT_CMD to the fw + * 2) Driver gets the response for that command. This response contains the + * Unique ID (UID) of the event. + * 3) The fw sends notification when the event starts. + * + * Of course the API provides various options that allow to cover parameters + * of the flow. + * What is the duration of the event? + * What is the start time of the event? + * Is there an end-time for the event? + * How much can the event be delayed? + * Can the event be split? + * If yes what is the maximal number of chunks? + * etc... + */ + +/** + * DOC: Abstraction to the driver + * + * In order to simplify the use of time events to the rest of the driver, + * we abstract the use of time events. This component provides the functions + * needed by the driver. + */ + +#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500 +#define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400 + +/** + * iwl_mvm_protect_session - start / extend the session protection. + * @mvm: the mvm component + * @vif: the virtual interface for which the session is issued + * @duration: the duration of the session in TU. + * @min_duration: will start a new session if the current session will end + * in less than min_duration. + * + * This function can be used to start a session protection which means that the + * fw will stay on the channel for %duration_ms milliseconds. This function + * will block (sleep) until the session starts. This function can also be used + * to extend a currently running session. + * This function is meant to be used for BSS association for example, where we + * want to make sure that the fw stays on the channel during the association. + */ +void iwl_mvm_protect_session(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 duration, u32 min_duration); + +/** + * iwl_mvm_stop_session_protection - cancel the session protection. + * @mvm: the mvm component + * @vif: the virtual interface for which the session is issued + * + * This functions cancels the session protection which is an act of good + * citizenship. If it is not needed any more it should be cancelled because + * the other bindings wait for the medium during that time. + * This funtions doesn't sleep. + */ +void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); + +/* + * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION. + */ +int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); + +/** + * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionlity + * @mvm: the mvm component + * @vif: the virtual interface for which the roc is requested. It is assumed + * that the vif type is NL80211_IFTYPE_P2P_DEVICE + * @duration: the requested duration in millisecond for the fw to be on the + * channel that is bound to the vif. + * + * This function can be used to issue a remain on channel session, + * which means that the fw will stay in the channel for the request %duration + * milliseconds. The function is async, meaning that it only issues the ROC + * request but does not wait for it to start. Once the FW is ready to serve the + * ROC request, it will issue a notification to the driver that it is on the + * requested channel. Once the FW completes the ROC request it will issue + * another notification to the driver. + */ +int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + int duration); + +/** + * iwl_mvm_stop_p2p_roc - stop remain on channel for p2p device functionlity + * @mvm: the mvm component + * + * This function can be used to cancel an ongoing ROC session. + * The function is async, it will instruct the FW to stop serving the ROC + * session, but will not wait for the actual stopping of the session. + */ +void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm); + +/** + * iwl_mvm_remove_time_event - general function to clean up of time event + * @mvm: the mvm component + * @vif: the vif to which the time event belongs + * @te_data: the time event data that corresponds to that time event + * + * This function can be used to cancel a time event regardless its type. + * It is useful for cleaning up time events running before removing an + * interface. + */ +void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + struct iwl_mvm_time_event_data *te_data); + +/** + * iwl_mvm_te_clear_data - remove time event from list + * @mvm: the mvm component + * @te_data: the time event data to remove + * + * This function is mostly internal, it is made available here only + * for firmware restart purposes. + */ +void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, + struct iwl_mvm_time_event_data *te_data); + +void iwl_mvm_roc_done_wk(struct work_struct *wk); + +#endif /* __time_event_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c new file mode 100644 index 000000000000..6b67ce3f679c --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -0,0 +1,916 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include <linux/ieee80211.h> +#include <linux/etherdevice.h> + +#include "iwl-trans.h" +#include "iwl-eeprom-parse.h" +#include "mvm.h" +#include "sta.h" + +/* + * Sets most of the Tx cmd's fields + */ +static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, u8 sta_id) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + __le16 fc = hdr->frame_control; + u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); + u32 len = skb->len + FCS_LEN; + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + tx_flags |= TX_CMD_FLG_ACK; + else + tx_flags &= ~TX_CMD_FLG_ACK; + + if (ieee80211_is_probe_resp(fc)) + tx_flags |= TX_CMD_FLG_TSF; + else if (ieee80211_is_back_req(fc)) + tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; + + /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */ + if (info->band == IEEE80211_BAND_2GHZ && + (skb->protocol == cpu_to_be16(ETH_P_PAE) || + is_multicast_ether_addr(hdr->addr1) || + ieee80211_is_back_req(fc) || + ieee80211_is_mgmt(fc))) + tx_flags |= TX_CMD_FLG_BT_DIS; + + if (ieee80211_has_morefrags(fc)) + tx_flags |= TX_CMD_FLG_MORE_FRAG; + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tx_cmd->tid_tspec = qc[0] & 0xf; + tx_flags &= ~TX_CMD_FLG_SEQ_CTL; + } else { + tx_cmd->tid_tspec = IWL_TID_NON_QOS; + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + tx_flags |= TX_CMD_FLG_SEQ_CTL; + else + tx_flags &= ~TX_CMD_FLG_SEQ_CTL; + } + + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) + tx_cmd->pm_frame_timeout = cpu_to_le16(3); + else + tx_cmd->pm_frame_timeout = cpu_to_le16(2); + + /* The spec allows Action frames in A-MPDU, we don't support + * it + */ + WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); + } else { + tx_cmd->pm_frame_timeout = 0; + } + + if (info->flags & IEEE80211_TX_CTL_AMPDU) + tx_flags |= TX_CMD_FLG_PROT_REQUIRE; + + if (ieee80211_is_data(fc) && len > mvm->rts_threshold && + !is_multicast_ether_addr(ieee80211_get_DA(hdr))) + tx_flags |= TX_CMD_FLG_PROT_REQUIRE; + + tx_cmd->driver_txop = 0; + tx_cmd->tx_flags = cpu_to_le32(tx_flags); + /* Total # bytes to be transmitted */ + tx_cmd->len = cpu_to_le16((u16)skb->len); + tx_cmd->next_frame_len = 0; + tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); + tx_cmd->sta_id = sta_id; +} + +/* + * Sets the fields in the Tx cmd that are rate related + */ +static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + __le16 fc) +{ + u32 rate_flags; + int rate_idx; + u8 rate_plcp; + + /* Set retry limit on RTS packets */ + tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; + + /* Set retry limit on DATA packets and Probe Responses*/ + if (ieee80211_is_probe_resp(fc)) { + tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT; + tx_cmd->rts_retry_limit = + min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit); + } else if (ieee80211_is_back_req(fc)) { + tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT; + } else { + tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY; + } + + /* + * for data packets, rate info comes from the table inside he fw. This + * table is controlled by LINK_QUALITY commands + */ + + if (ieee80211_is_data(fc)) { + tx_cmd->initial_rate_index = 0; + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); + return; + } else if (ieee80211_is_back_req(fc)) { + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); + } + + /* HT rate doesn't make sense for a non data frame */ + WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, + "Got an HT rate for a non data frame 0x%x\n", + info->control.rates[0].flags); + + rate_idx = info->control.rates[0].idx; + /* if the rate isn't a well known legacy rate, take the lowest one */ + if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY) + rate_idx = rate_lowest_index( + &mvm->nvm_data->bands[info->band], sta); + + /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ + if (info->band == IEEE80211_BAND_5GHZ) + rate_idx += IWL_FIRST_OFDM_RATE; + + /* For 2.4 GHZ band, check that there is no need to remap */ + BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); + + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); + + mvm->mgmt_last_antenna_idx = + iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant, + mvm->mgmt_last_antenna_idx); + rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; + + /* Set CCK flag as needed */ + if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + + /* Set the rate in the TX cmd */ + tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags); +} + +/* + * Sets the fields in the Tx cmd that are crypto related + */ +static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd, + struct sk_buff *skb_frag) +{ + struct ieee80211_key_conf *keyconf = info->control.hw_key; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); + if (info->flags & IEEE80211_TX_CTL_AMPDU) + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG); + break; + + case WLAN_CIPHER_SUITE_TKIP: + tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; + ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); + break; + + case WLAN_CIPHER_SUITE_WEP104: + tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; + /* fall through */ + case WLAN_CIPHER_SUITE_WEP40: + tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | + ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) & + TX_CMD_SEC_WEP_KEY_IDX_MSK); + + memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); + break; + default: + IWL_ERR(mvm, "Unknown encode cipher %x\n", keyconf->cipher); + break; + } +} + +/* + * Allocates and sets the Tx cmd the driver data pointers in the skb + */ +static struct iwl_device_cmd * +iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta, u8 sta_id) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_device_cmd *dev_cmd; + struct iwl_tx_cmd *tx_cmd; + + dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); + + if (unlikely(!dev_cmd)) + return NULL; + + memset(dev_cmd, 0, sizeof(*dev_cmd)); + tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; + + if (info->control.hw_key) + iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb); + + iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); + + iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); + + memset(&info->status, 0, sizeof(info->status)); + + info->driver_data[0] = NULL; + info->driver_data[1] = dev_cmd; + + return dev_cmd; +} + +int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_device_cmd *dev_cmd; + struct iwl_tx_cmd *tx_cmd; + u8 sta_id; + + if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU)) + return -1; + + if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && + (!info->control.vif || + info->hw_queue != info->control.vif->cab_queue))) + return -1; + + /* + * If the interface on which frame is sent is the P2P_DEVICE + * or an AP/GO interface use the broadcast station associated + * with it; otherwise use the AUX station. + */ + if (info->control.vif && + (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE || + info->control.vif->type == NL80211_IFTYPE_AP)) { + struct iwl_mvm_vif *mvmvif = + iwl_mvm_vif_from_mac80211(info->control.vif); + sta_id = mvmvif->bcast_sta.sta_id; + } else { + sta_id = mvm->aux_sta.sta_id; + } + + IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue); + + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id); + if (!dev_cmd) + return -1; + + /* From now on, we cannot access info->control */ + tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control)); + + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) { + iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); + return -1; + } + + return 0; +} + +/* + * Sets the fields in the Tx cmd that are crypto related + */ +int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_mvm_sta *mvmsta; + struct iwl_device_cmd *dev_cmd; + struct iwl_tx_cmd *tx_cmd; + __le16 fc; + u16 seq_number = 0; + u8 tid = IWL_MAX_TID_COUNT; + u8 txq_id = info->hw_queue; + bool is_data_qos = false, is_ampdu = false; + + mvmsta = (void *)sta->drv_priv; + fc = hdr->frame_control; + + if (WARN_ON_ONCE(!mvmsta)) + return -1; + + if (WARN_ON_ONCE(mvmsta->sta_id == IWL_INVALID_STATION)) + return -1; + + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id); + if (!dev_cmd) + goto drop; + + tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; + /* From now on, we cannot access info->control */ + + spin_lock(&mvmsta->lock); + + if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { + u8 *qc = NULL; + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) + goto drop_unlock_sta; + + seq_number = mvmsta->tid_data[tid].seq_number; + seq_number &= IEEE80211_SCTL_SEQ; + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seq_number); + seq_number += 0x10; + is_data_qos = true; + is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; + } + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc)); + + WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); + + if (is_ampdu) { + if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON)) + goto drop_unlock_sta; + txq_id = mvmsta->tid_data[tid].txq_id; + } + + IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, + tid, txq_id, seq_number); + + /* NOTE: aggregation will need changes here (for txq id) */ + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) + goto drop_unlock_sta; + + if (is_data_qos && !ieee80211_has_morefrags(fc)) + mvmsta->tid_data[tid].seq_number = seq_number; + + spin_unlock(&mvmsta->lock); + + if (mvmsta->vif->type == NL80211_IFTYPE_AP && + txq_id < IWL_FIRST_AMPDU_QUEUE) + atomic_inc(&mvmsta->pending_frames); + + return 0; + +drop_unlock_sta: + iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); + spin_unlock(&mvmsta->lock); +drop: + return -1; +} + +static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, u8 tid) +{ + struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + struct ieee80211_vif *vif = mvmsta->vif; + + lockdep_assert_held(&mvmsta->lock); + + if (tid_data->ssn != tid_data->next_reclaimed) + return; + + switch (tid_data->state) { + case IWL_EMPTYING_HW_QUEUE_ADDBA: + IWL_DEBUG_TX_QUEUES(mvm, + "Can continue addBA flow ssn = next_recl = %d\n", + tid_data->next_reclaimed); + tid_data->state = IWL_AGG_STARTING; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + + case IWL_EMPTYING_HW_QUEUE_DELBA: + IWL_DEBUG_TX_QUEUES(mvm, + "Can continue DELBA flow ssn = next_recl = %d\n", + tid_data->next_reclaimed); + iwl_trans_txq_disable(mvm->trans, tid_data->txq_id); + tid_data->state = IWL_AGG_OFF; + /* + * we can't hold the mutex - but since we are after a sequence + * point (call to iwl_trans_txq_disable), so we don't even need + * a memory barrier. + */ + mvm->queue_to_mac80211[tid_data->txq_id] = + IWL_INVALID_MAC80211_QUEUE; + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + + default: + break; + } +} + +#ifdef CONFIG_IWLWIFI_DEBUG +const char *iwl_mvm_get_tx_fail_reason(u32 status) +{ +#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x +#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x + + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_POSTPONE(DELAY); + TX_STATUS_POSTPONE(FEW_BYTES); + TX_STATUS_POSTPONE(BT_PRIO); + TX_STATUS_POSTPONE(QUIET_PERIOD); + TX_STATUS_POSTPONE(CALC_TTAK); + TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); + TX_STATUS_FAIL(SHORT_LIMIT); + TX_STATUS_FAIL(LONG_LIMIT); + TX_STATUS_FAIL(UNDERRUN); + TX_STATUS_FAIL(DRAIN_FLOW); + TX_STATUS_FAIL(RFKILL_FLUSH); + TX_STATUS_FAIL(LIFE_EXPIRE); + TX_STATUS_FAIL(DEST_PS); + TX_STATUS_FAIL(HOST_ABORTED); + TX_STATUS_FAIL(BT_RETRY); + TX_STATUS_FAIL(STA_INVALID); + TX_STATUS_FAIL(FRAG_DROPPED); + TX_STATUS_FAIL(TID_DISABLE); + TX_STATUS_FAIL(FIFO_FLUSHED); + TX_STATUS_FAIL(SMALL_CF_POLL); + TX_STATUS_FAIL(FW_DROP); + TX_STATUS_FAIL(STA_COLOR_MISMATCH); + } + + return "UNKNOWN"; + +#undef TX_STATUS_FAIL +#undef TX_STATUS_POSTPONE +} +#endif /* CONFIG_IWLWIFI_DEBUG */ + +/** + * translate ucode response to mac80211 tx status control values + */ +static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags, + struct ieee80211_tx_info *info) +{ + struct ieee80211_tx_rate *r = &info->status.rates[0]; + + info->status.antenna = + ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); + if (rate_n_flags & RATE_HT_MCS_GF_MSK) + r->flags |= IEEE80211_TX_RC_GREEN_FIELD; + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + break; + case RATE_MCS_CHAN_WIDTH_40: + r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + break; + case RATE_MCS_CHAN_WIDTH_80: + r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; + break; + case RATE_MCS_CHAN_WIDTH_160: + r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; + break; + } + if (rate_n_flags & RATE_MCS_SGI_MSK) + r->flags |= IEEE80211_TX_RC_SHORT_GI; + if (rate_n_flags & RATE_MCS_HT_MSK) { + r->flags |= IEEE80211_TX_RC_MCS; + r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; + } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + ieee80211_rate_set_vht( + r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK, + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS) + 1); + r->flags |= IEEE80211_TX_RC_VHT_MCS; + } else { + r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, + info->band); + } +} + +static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + struct ieee80211_sta *sta; + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); + int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); + u32 status = le16_to_cpu(tx_resp->status.status); + u16 ssn = iwl_mvm_get_scd_ssn(tx_resp); + struct iwl_mvm_sta *mvmsta; + struct sk_buff_head skbs; + u8 skb_freed = 0; + u16 next_reclaimed, seq_ctl; + + __skb_queue_head_init(&skbs); + + seq_ctl = le16_to_cpu(tx_resp->seq_ctl); + + /* we can free until ssn % q.n_bd not inclusive */ + iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); + + while (!skb_queue_empty(&skbs)) { + struct sk_buff *skb = __skb_dequeue(&skbs); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + skb_freed++; + + iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); + + memset(&info->status, 0, sizeof(info->status)); + + info->flags &= ~IEEE80211_TX_CTL_AMPDU; + + /* inform mac80211 about what happened with the frame */ + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + case TX_STATUS_DIRECT_DONE: + info->flags |= IEEE80211_TX_STAT_ACK; + break; + case TX_STATUS_FAIL_DEST_PS: + info->flags |= IEEE80211_TX_STAT_TX_FILTERED; + break; + default: + break; + } + + info->status.rates[0].count = tx_resp->failure_frame + 1; + iwl_mvm_hwrate_to_tx_control(le32_to_cpu(tx_resp->initial_rate), + info); + + /* Single frame failure in an AMPDU queue => send BAR */ + if (txq_id >= IWL_FIRST_AMPDU_QUEUE && + !(info->flags & IEEE80211_TX_STAT_ACK)) { + /* there must be only one skb in the skb_list */ + WARN_ON_ONCE(skb_freed > 1 || + !skb_queue_empty(&skbs)); + info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + } + + /* W/A FW bug: seq_ctl is wrong when the queue is flushed */ + if (status == TX_STATUS_FAIL_FIFO_FLUSHED) { + struct ieee80211_hdr *hdr = (void *)skb->data; + seq_ctl = le16_to_cpu(hdr->seq_ctrl); + } + + ieee80211_tx_status_ni(mvm->hw, skb); + } + + if (txq_id >= IWL_FIRST_AMPDU_QUEUE) { + /* If this is an aggregation queue, we use the ssn since: + * ssn = wifi seq_num % 256. + * The seq_ctl is the sequence control of the packet to which + * this Tx response relates. But if there is a hole in the + * bitmap of the BA we received, this Tx response may allow to + * reclaim the hole and all the subsequent packets that were + * already acked. In that case, seq_ctl != ssn, and the next + * packet to be reclaimed will be ssn and not seq_ctl. In that + * case, several packets will be reclaimed even if + * frame_count = 1. + * + * The ssn is the index (% 256) of the latest packet that has + * treated (acked / dropped) + 1. + */ + next_reclaimed = ssn; + } else { + /* The next packet to be reclaimed is the one after this one */ + next_reclaimed = SEQ_TO_SN(seq_ctl + 0x10); + } + + IWL_DEBUG_TX_REPLY(mvm, + "TXQ %d status %s (0x%08x)\n\t\t\t\tinitial_rate 0x%x " + "retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n", + txq_id, iwl_mvm_get_tx_fail_reason(status), + status, le32_to_cpu(tx_resp->initial_rate), + tx_resp->failure_frame, SEQ_TO_INDEX(sequence), + ssn, next_reclaimed, seq_ctl); + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + if (!IS_ERR_OR_NULL(sta)) { + mvmsta = (void *)sta->drv_priv; + + if (tid != IWL_TID_NON_QOS) { + struct iwl_mvm_tid_data *tid_data = + &mvmsta->tid_data[tid]; + + spin_lock_bh(&mvmsta->lock); + tid_data->next_reclaimed = next_reclaimed; + IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", + next_reclaimed); + iwl_mvm_check_ratid_empty(mvm, sta, tid); + spin_unlock_bh(&mvmsta->lock); + } + +#ifdef CONFIG_PM_SLEEP + mvmsta->last_seq_ctl = seq_ctl; +#endif + } else { + sta = NULL; + mvmsta = NULL; + } + + /* + * If the txq is not an AMPDU queue, there is no chance we freed + * several skbs. Check that out... + * If there are no pending frames for this STA, notify mac80211 that + * this station can go to sleep in its STA table. + */ + if (txq_id < IWL_FIRST_AMPDU_QUEUE && mvmsta && + !WARN_ON(skb_freed > 1) && + mvmsta->vif->type == NL80211_IFTYPE_AP && + atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) { + ieee80211_sta_block_awake(mvm->hw, sta, false); + set_bit(sta_id, mvm->sta_drained); + schedule_work(&mvm->sta_drained_wk); + } + + rcu_read_unlock(); +} + +#ifdef CONFIG_IWLWIFI_DEBUG +#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x +static const char *iwl_get_agg_tx_status(u16 status) +{ + switch (status & AGG_TX_STATE_STATUS_MSK) { + AGG_TX_STATE_(TRANSMITTED); + AGG_TX_STATE_(UNDERRUN); + AGG_TX_STATE_(BT_PRIO); + AGG_TX_STATE_(FEW_BYTES); + AGG_TX_STATE_(ABORT); + AGG_TX_STATE_(LAST_SENT_TTL); + AGG_TX_STATE_(LAST_SENT_TRY_CNT); + AGG_TX_STATE_(LAST_SENT_BT_KILL); + AGG_TX_STATE_(SCD_QUERY); + AGG_TX_STATE_(TEST_BAD_CRC32); + AGG_TX_STATE_(RESPONSE); + AGG_TX_STATE_(DUMP_TX); + AGG_TX_STATE_(DELAY_TX); + } + + return "UNKNOWN"; +} + +static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + struct agg_tx_status *frame_status = &tx_resp->status; + int i; + + for (i = 0; i < tx_resp->frame_count; i++) { + u16 fstatus = le16_to_cpu(frame_status[i].status); + + IWL_DEBUG_TX_REPLY(mvm, + "status %s (0x%04x), try-count (%d) seq (0x%x)\n", + iwl_get_agg_tx_status(fstatus), + fstatus & AGG_TX_STATE_STATUS_MSK, + (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >> + AGG_TX_STATE_TRY_CNT_POS, + le16_to_cpu(frame_status[i].sequence)); + } +} +#else +static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{} +#endif /* CONFIG_IWLWIFI_DEBUG */ + +static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); + int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + struct ieee80211_sta *sta; + + if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < IWL_FIRST_AMPDU_QUEUE)) + return; + + if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) + return; + + iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; + mvmsta->tid_data[tid].rate_n_flags = + le32_to_cpu(tx_resp->initial_rate); + } + + rcu_read_unlock(); +} + +int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + + if (tx_resp->frame_count == 1) + iwl_mvm_rx_tx_cmd_single(mvm, pkt); + else + iwl_mvm_rx_tx_cmd_agg(mvm, pkt); + + return 0; +} + +int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data; + struct sk_buff_head reclaimed_skbs; + struct iwl_mvm_tid_data *tid_data; + struct ieee80211_tx_info *info; + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + struct ieee80211_hdr *hdr; + struct sk_buff *skb; + int sta_id, tid, freed; + + /* "flow" corresponds to Tx queue */ + u16 scd_flow = le16_to_cpu(ba_notif->scd_flow); + + /* "ssn" is start of block-ack Tx window, corresponds to index + * (in Tx queue's circular buffer) of first TFD/frame in window */ + u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn); + + sta_id = ba_notif->sta_id; + tid = ba_notif->tid; + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + /* Reclaiming frames for a station that has been deleted ? */ + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + return 0; + } + + mvmsta = (void *)sta->drv_priv; + tid_data = &mvmsta->tid_data[tid]; + + if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d", + tid_data->txq_id, tid, scd_flow)) { + rcu_read_unlock(); + return 0; + } + + spin_lock_bh(&mvmsta->lock); + + __skb_queue_head_init(&reclaimed_skbs); + + /* + * Release all TFDs before the SSN, i.e. all TFDs in front of + * block-ack window (we assume that they've been successfully + * transmitted ... if not, it's too late anyway). + */ + iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn, + &reclaimed_skbs); + + IWL_DEBUG_TX_REPLY(mvm, + "BA_NOTIFICATION Received from %pM, sta_id = %d\n", + (u8 *)&ba_notif->sta_addr_lo32, + ba_notif->sta_id); + IWL_DEBUG_TX_REPLY(mvm, + "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", + ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), + (unsigned long long)le64_to_cpu(ba_notif->bitmap), + scd_flow, ba_resp_scd_ssn, ba_notif->txed, + ba_notif->txed_2_done); + + tid_data->next_reclaimed = ba_resp_scd_ssn; + + iwl_mvm_check_ratid_empty(mvm, sta, tid); + + freed = 0; + + skb_queue_walk(&reclaimed_skbs, skb) { + hdr = (struct ieee80211_hdr *)skb->data; + + if (ieee80211_is_data_qos(hdr->frame_control)) + freed++; + else + WARN_ON_ONCE(1); + + info = IEEE80211_SKB_CB(skb); + iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); + + if (freed == 1) { + /* this is the first skb we deliver in this batch */ + /* put the rate scaling data there */ + info = IEEE80211_SKB_CB(skb); + memset(&info->status, 0, sizeof(info->status)); + info->flags |= IEEE80211_TX_STAT_ACK; + info->flags |= IEEE80211_TX_STAT_AMPDU; + info->status.ampdu_ack_len = ba_notif->txed_2_done; + info->status.ampdu_len = ba_notif->txed; + iwl_mvm_hwrate_to_tx_control(tid_data->rate_n_flags, + info); + } + } + + spin_unlock_bh(&mvmsta->lock); + + rcu_read_unlock(); + + while (!skb_queue_empty(&reclaimed_skbs)) { + skb = __skb_dequeue(&reclaimed_skbs); + ieee80211_tx_status_ni(mvm->hw, skb); + } + + return 0; +} + +int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync) +{ + int ret; + struct iwl_tx_path_flush_cmd flush_cmd = { + .queues_ctl = cpu_to_le32(tfd_msk), + .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), + }; + + u32 flags = sync ? CMD_SYNC : CMD_ASYNC; + + ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, + sizeof(flush_cmd), &flush_cmd); + if (ret) + IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); + return ret; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c new file mode 100644 index 000000000000..000e842c2edd --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -0,0 +1,472 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include <net/mac80211.h> + +#include "iwl-debug.h" +#include "iwl-io.h" + +#include "mvm.h" +#include "fw-api-rs.h" + +/* + * Will return 0 even if the cmd failed when RFKILL is asserted unless + * CMD_WANT_SKB is set in cmd->flags. + */ +int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) +{ + int ret; + + /* + * Synchronous commands from this op-mode must hold + * the mutex, this ensures we don't try to send two + * (or more) synchronous commands at a time. + */ + if (!(cmd->flags & CMD_ASYNC)) + lockdep_assert_held(&mvm->mutex); + + ret = iwl_trans_send_cmd(mvm->trans, cmd); + + /* + * If the caller wants the SKB, then don't hide any problems, the + * caller might access the response buffer which will be NULL if + * the command failed. + */ + if (cmd->flags & CMD_WANT_SKB) + return ret; + + /* Silently ignore failures if RFKILL is asserted */ + if (!ret || ret == -ERFKILL) + return 0; + return ret; +} + +int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id, + u32 flags, u16 len, const void *data) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = { len, }, + .data = { data, }, + .flags = flags, + }; + + return iwl_mvm_send_cmd(mvm, &cmd); +} + +/* + * We assume that the caller set the status to the sucess value + */ +int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, + u32 *status) +{ + struct iwl_rx_packet *pkt; + struct iwl_cmd_response *resp; + int ret, resp_len; + + lockdep_assert_held(&mvm->mutex); + + /* + * Only synchronous commands can wait for status, + * we use WANT_SKB so the caller can't. + */ + if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB), + "cmd flags %x", cmd->flags)) + return -EINVAL; + + cmd->flags |= CMD_SYNC | CMD_WANT_SKB; + + ret = iwl_trans_send_cmd(mvm->trans, cmd); + if (ret == -ERFKILL) { + /* + * The command failed because of RFKILL, don't update + * the status, leave it as success and return 0. + */ + return 0; + } else if (ret) { + return ret; + } + + pkt = cmd->resp_pkt; + /* Can happen if RFKILL is asserted */ + if (!pkt) { + ret = 0; + goto out_free_resp; + } + + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + ret = -EIO; + goto out_free_resp; + } + + resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) { + ret = -EIO; + goto out_free_resp; + } + + resp = (void *)pkt->data; + *status = le32_to_cpu(resp->status); + out_free_resp: + iwl_free_resp(cmd); + return ret; +} + +/* + * We assume that the caller set the status to the sucess value + */ +int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len, + const void *data, u32 *status) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = { len, }, + .data = { data, }, + }; + + return iwl_mvm_send_cmd_status(mvm, &cmd, status); +} + +#define IWL_DECLARE_RATE_INFO(r) \ + [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP + +/* + * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP + */ +static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(1), + IWL_DECLARE_RATE_INFO(2), + IWL_DECLARE_RATE_INFO(5), + IWL_DECLARE_RATE_INFO(11), + IWL_DECLARE_RATE_INFO(6), + IWL_DECLARE_RATE_INFO(9), + IWL_DECLARE_RATE_INFO(12), + IWL_DECLARE_RATE_INFO(18), + IWL_DECLARE_RATE_INFO(24), + IWL_DECLARE_RATE_INFO(36), + IWL_DECLARE_RATE_INFO(48), + IWL_DECLARE_RATE_INFO(54), +}; + +int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, + enum ieee80211_band band) +{ + int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; + int idx; + int band_offset = 0; + + /* Legacy rate format, search for match in table */ + if (band == IEEE80211_BAND_5GHZ) + band_offset = IWL_FIRST_OFDM_RATE; + for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) + if (fw_rate_idx_to_plcp[idx] == rate) + return idx - band_offset; + + return -1; +} + +u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx) +{ + /* Get PLCP rate for tx_cmd->rate_n_flags */ + return fw_rate_idx_to_plcp[rate_idx]; +} + +int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_error_resp *err_resp = (void *)pkt->data; + + IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n", + le32_to_cpu(err_resp->error_type), err_resp->cmd_id); + IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n", + le16_to_cpu(err_resp->bad_cmd_seq_num), + le32_to_cpu(err_resp->error_service)); + IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n", + le64_to_cpu(err_resp->timestamp)); + return 0; +} + +/* + * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h. + * The parameter should also be a combination of ANT_[ABC]. + */ +u8 first_antenna(u8 mask) +{ + BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */ + WARN_ON_ONCE(!mask); /* ffs will return 0 if mask is zeroed */ + return (u8)(BIT(ffs(mask))); +} + +/* + * Toggles between TX antennas to send the probe request on. + * Receives the bitmask of valid TX antennas and the *index* used + * for the last TX, and returns the next valid *index* to use. + * In order to set it in the tx_cmd, must do BIT(idx). + */ +u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) +{ + u8 ind = last_idx; + int i; + + for (i = 0; i < RATE_MCS_ANT_NUM; i++) { + ind = (ind + 1) % RATE_MCS_ANT_NUM; + if (valid & BIT(ind)) + return ind; + } + + WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid); + return last_idx; +} + +static struct { + char *name; + u8 num; +} advanced_lookup[] = { + { "NMI_INTERRUPT_WDG", 0x34 }, + { "SYSASSERT", 0x35 }, + { "UCODE_VERSION_MISMATCH", 0x37 }, + { "BAD_COMMAND", 0x38 }, + { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, + { "FATAL_ERROR", 0x3D }, + { "NMI_TRM_HW_ERR", 0x46 }, + { "NMI_INTERRUPT_TRM", 0x4C }, + { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, + { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, + { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, + { "NMI_INTERRUPT_HOST", 0x66 }, + { "NMI_INTERRUPT_ACTION_PT", 0x7C }, + { "NMI_INTERRUPT_UNKNOWN", 0x84 }, + { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, + { "ADVANCED_SYSASSERT", 0 }, +}; + +static const char *desc_lookup(u32 num) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) + if (advanced_lookup[i].num == num) + return advanced_lookup[i].name; + + /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ + return advanced_lookup[i].name; +} + +/* + * Note: This structure is read from the device with IO accesses, + * and the reading already does the endian conversion. As it is + * read with u32-sized accesses, any members with a different size + * need to be ordered correctly though! + */ +struct iwl_error_event_table { + u32 valid; /* (nonzero) valid, (0) log is empty */ + u32 error_id; /* type of error */ + u32 pc; /* program counter */ + u32 blink1; /* branch link */ + u32 blink2; /* branch link */ + u32 ilink1; /* interrupt link */ + u32 ilink2; /* interrupt link */ + u32 data1; /* error-specific data */ + u32 data2; /* error-specific data */ + u32 data3; /* error-specific data */ + u32 bcon_time; /* beacon timer */ + u32 tsf_low; /* network timestamp function timer */ + u32 tsf_hi; /* network timestamp function timer */ + u32 gp1; /* GP1 timer register */ + u32 gp2; /* GP2 timer register */ + u32 gp3; /* GP3 timer register */ + u32 ucode_ver; /* uCode version */ + u32 hw_ver; /* HW Silicon version */ + u32 brd_ver; /* HW board version */ + u32 log_pc; /* log program counter */ + u32 frame_ptr; /* frame pointer */ + u32 stack_ptr; /* stack pointer */ + u32 hcmd; /* last host command header */ + u32 isr0; /* isr status register LMPM_NIC_ISR0: + * rxtx_flag */ + u32 isr1; /* isr status register LMPM_NIC_ISR1: + * host_flag */ + u32 isr2; /* isr status register LMPM_NIC_ISR2: + * enc_flag */ + u32 isr3; /* isr status register LMPM_NIC_ISR3: + * time_flag */ + u32 isr4; /* isr status register LMPM_NIC_ISR4: + * wico interrupt */ + u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ + u32 wait_event; /* wait event() caller address */ + u32 l2p_control; /* L2pControlField */ + u32 l2p_duration; /* L2pDurationField */ + u32 l2p_mhvalid; /* L2pMhValidBits */ + u32 l2p_addr_match; /* L2pAddrMatchStat */ + u32 lmpm_pmg_sel; /* indicate which clocks are turned on + * (LMPM_PMG_SEL) */ + u32 u_timestamp; /* indicate when the date and time of the + * compilation */ + u32 flow_handler; /* FH read/write pointers, RX credit */ +} __packed; + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) +{ + struct iwl_trans *trans = mvm->trans; + struct iwl_error_event_table table; + u32 base; + + base = mvm->error_event_table; + if (mvm->cur_ucode == IWL_UCODE_INIT) { + if (!base) + base = mvm->fw->init_errlog_ptr; + } else { + if (!base) + base = mvm->fw->inst_errlog_ptr; + } + + if (base < 0x800000 || base >= 0x80C000) { + IWL_ERR(mvm, + "Not valid error log pointer 0x%08X for %s uCode\n", + base, + (mvm->cur_ucode == IWL_UCODE_INIT) + ? "Init" : "RT"); + return; + } + + iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { + IWL_ERR(trans, "Start IWL Error Log Dump:\n"); + IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", + mvm->status, table.valid); + } + + trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, + table.data1, table.data2, table.data3, + table.blink1, table.blink2, table.ilink1, + table.ilink2, table.bcon_time, table.gp1, + table.gp2, table.gp3, table.ucode_ver, + table.hw_ver, table.brd_ver); + IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, + desc_lookup(table.error_id)); + IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); + IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); + IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); + IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); + IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); + IWL_ERR(mvm, "0x%08X | data1\n", table.data1); + IWL_ERR(mvm, "0x%08X | data2\n", table.data2); + IWL_ERR(mvm, "0x%08X | data3\n", table.data3); + IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); + IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); + IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); + IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); + IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); + IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); + IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver); + IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); + IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); + IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); + IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); + IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); + IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); + IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); + IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); + IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); + IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); + IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); + IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); + IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); + IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); + IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); + IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); + IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); +} + +/** + * iwl_mvm_send_lq_cmd() - Send link quality command + * @init: This command is sent as part of station initialization right + * after station has been added. + * + * The link quality command is sent as the last step of station creation. + * This is the special case in which init is set and we call a callback in + * this case to clear the state indicating that station creation is in + * progress. + */ +int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, + u8 flags, bool init) +{ + struct iwl_host_cmd cmd = { + .id = LQ_CMD, + .len = { sizeof(struct iwl_lq_cmd), }, + .flags = flags, + .data = { lq, }, + }; + + if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) + return -EINVAL; + + if (WARN_ON(init && (cmd.flags & CMD_ASYNC))) + return -EINVAL; + + return iwl_mvm_send_cmd(mvm, &cmd); +} diff --git a/drivers/net/wireless/iwlwifi/pcie/1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c index f8620ecae6b4..ff3389757281 100644 --- a/drivers/net/wireless/iwlwifi/pcie/1000.c +++ b/drivers/net/wireless/iwlwifi/pcie/1000.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/iwlwifi/pcie/2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c index 244019cec3e1..e7de33128b16 100644 --- a/drivers/net/wireless/iwlwifi/pcie/2000.c +++ b/drivers/net/wireless/iwlwifi/pcie/2000.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/iwlwifi/pcie/5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c index 83ca40321ff1..5096f7c96ab6 100644 --- a/drivers/net/wireless/iwlwifi/pcie/5000.c +++ b/drivers/net/wireless/iwlwifi/pcie/5000.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c index d4df976d4709..801ff49796dd 100644 --- a/drivers/net/wireless/iwlwifi/pcie/6000.c +++ b/drivers/net/wireless/iwlwifi/pcie/6000.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/iwlwifi/pcie/7000.c b/drivers/net/wireless/iwlwifi/pcie/7000.c new file mode 100644 index 000000000000..6e35b2b72332 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/pcie/7000.c @@ -0,0 +1,111 @@ +/****************************************************************************** + * + * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/module.h> +#include <linux/stringify.h> +#include "iwl-config.h" +#include "iwl-agn-hw.h" +#include "cfg.h" + +/* Highest firmware API version supported */ +#define IWL7260_UCODE_API_MAX 6 +#define IWL3160_UCODE_API_MAX 6 + +/* Oldest version we won't warn about */ +#define IWL7260_UCODE_API_OK 6 +#define IWL3160_UCODE_API_OK 6 + +/* Lowest firmware API version supported */ +#define IWL7260_UCODE_API_MIN 6 +#define IWL3160_UCODE_API_MIN 6 + +/* NVM versions */ +#define IWL7260_NVM_VERSION 0x0a1d +#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ +#define IWL3160_NVM_VERSION 0x709 +#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ + +#define IWL7260_FW_PRE "iwlwifi-7260-" +#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode" + +#define IWL3160_FW_PRE "iwlwifi-3160-" +#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" + +static const struct iwl_base_params iwl7000_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .num_of_queues = IWLAGN_NUM_QUEUES, + .pll_cfg_val = 0, + .shadow_ram_support = true, + .led_compensation = 57, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ +}; + +static const struct iwl_ht_params iwl7000_ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), +}; + +#define IWL_DEVICE_7000 \ + .ucode_api_max = IWL7260_UCODE_API_MAX, \ + .ucode_api_ok = IWL7260_UCODE_API_OK, \ + .ucode_api_min = IWL7260_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_7000, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .base_params = &iwl7000_base_params, \ + /* TODO: .bt_params? */ \ + .need_temp_offset_calib = true, \ + .led_mode = IWL_LED_RF_STATE, \ + .adv_pm = true \ + + +const struct iwl_cfg iwl7260_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC7260", + .fw_name_pre = IWL7260_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7260_NVM_VERSION, + .nvm_calib_ver = IWL7260_TX_POWER_VERSION, +}; + +const struct iwl_cfg iwl3160_ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC3160", + .fw_name_pre = IWL3160_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL3160_NVM_VERSION, + .nvm_calib_ver = IWL3160_TX_POWER_VERSION, +}; + +MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); +MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/pcie/cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h index 82152311d73b..c6f8e83c3551 100644 --- a/drivers/net/wireless/iwlwifi/pcie/cfg.h +++ b/drivers/net/wireless/iwlwifi/pcie/cfg.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -109,5 +109,7 @@ extern const struct iwl_cfg iwl6035_2agn_cfg; extern const struct iwl_cfg iwl105_bgn_cfg; extern const struct iwl_cfg iwl105_bgn_d_cfg; extern const struct iwl_cfg iwl135_bgn_cfg; +extern const struct iwl_cfg iwl7260_2ac_cfg; +extern const struct iwl_cfg iwl3160_ac_cfg; #endif /* __iwl_pci_h__ */ diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index c2e141af353c..7bc0fb9128dd 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -255,6 +255,12 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, +/* 7000 Series */ + {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_ac_cfg)}, + {0} }; MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index d91d2e8c62f5..aa2a39a637dd 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -222,8 +222,6 @@ struct iwl_txq { * @rx_replenish: work that will be called when buffers need to be allocated * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area - * @irq - the irq number for the device - * @irq_requested: true when the irq has been requested * @scd_base_addr: scheduler sram base address in SRAM * @scd_bc_tbls: pointer to the byte count table of the scheduler * @kw: keep warm address @@ -234,8 +232,10 @@ struct iwl_txq { * @status - transport specific status flags * @cmd_queue - command queue number * @rx_buf_size_8k: 8 kB RX buffer size + * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) * @rx_page_order: page order for receive buffer size * @wd_timeout: queue watchdog timeout (jiffies) + * @reg_lock: protect hw register access */ struct iwl_trans_pcie { struct iwl_rxq rxq; @@ -249,11 +249,8 @@ struct iwl_trans_pcie { int ict_index; u32 inta; bool use_ict; - bool irq_requested; - struct tasklet_struct irq_tasklet; struct isr_statistics isr_stats; - unsigned int irq; spinlock_t irq_lock; u32 inta_mask; u32 scd_base_addr; @@ -279,12 +276,16 @@ struct iwl_trans_pcie { u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; bool rx_buf_size_8k; + bool bc_table_dword; u32 rx_page_order; const char **command_names; /* queue watchdog */ unsigned long wd_timeout; + + /*protect hw register */ + spinlock_t reg_lock; }; /** @@ -328,7 +329,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans); * RX ******************************************************/ int iwl_pcie_rx_init(struct iwl_trans *trans); -void iwl_pcie_tasklet(struct iwl_trans *trans); +irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); int iwl_pcie_rx_stop(struct iwl_trans *trans); void iwl_pcie_rx_free(struct iwl_trans *trans); @@ -359,6 +360,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, int handler_status); void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs); +void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); + /***************************************************** * Error handling ******************************************************/ diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 8389cd38338b..b0ae06d2456f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -81,10 +81,10 @@ * 'processed' and 'read' driver indexes as well) * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. - * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free - * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ - * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there - * were enough free buffers and RX_STALLED is set it is cleared. + * + The Host/Firmware iwl->rxq is replenished at irq thread time from the + * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, + * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. + * If there were enough free buffers and RX_STALLED is set it is cleared. * * * Driver sequence: @@ -214,9 +214,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) /* * If the device isn't enabled - not need to try to add buffers... * This can happen when we stop the device and still have an interrupt - * pending. We stop the APM before we sync the interrupts / tasklets - * because we have to (see comment there). On the other hand, since - * the APM is stopped, we cannot access the HW (in particular not prph). + * pending. We stop the APM before we sync the interrupts because we + * have to (see comment there). On the other hand, since the APM is + * stopped, we cannot access the HW (in particular not prph). * So don't try to restock if the APM has been already stopped. */ if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) @@ -436,7 +436,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans) err_rb_stts: dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma); - memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); + rxq->bd_dma = 0; rxq->bd = NULL; err_bd: return -ENOMEM; @@ -455,6 +455,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) /* Stop Rx DMA */ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + /* reset and flush pointers */ + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); + iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0); /* Reset driver's Rx queue write index */ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); @@ -491,7 +495,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - int i, err; unsigned long flags; @@ -518,6 +521,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) rxq->read = rxq->write = 0; rxq->write_actual = 0; rxq->free_count = 0; + memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock_irqrestore(&rxq->lock, flags); iwl_pcie_rx_replenish(trans); @@ -545,13 +549,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) return; } + cancel_work_sync(&trans_pcie->rx_replenish); + spin_lock_irqsave(&rxq->lock, flags); iwl_pcie_rxq_free_rbs(trans); spin_unlock_irqrestore(&rxq->lock, flags); dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma); - memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); + rxq->bd_dma = 0; rxq->bd = NULL; if (rxq->rb_stts) @@ -560,7 +566,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) rxq->rb_stts, rxq->rb_stts_dma); else IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); - memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); + rxq->rb_stts_dma = 0; rxq->rb_stts = NULL; } @@ -588,6 +594,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, int index, cmd_index, err, len; struct iwl_rx_cmd_buffer rxcb = { ._offset = offset, + ._rx_page_order = trans_pcie->rx_page_order, ._page = rxb->page, ._page_stolen = false, .truesize = max_len, @@ -789,11 +796,14 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); wake_up(&trans_pcie->wait_command_queue); + local_bh_disable(); iwl_op_mode_nic_error(trans->op_mode); + local_bh_enable(); } -void iwl_pcie_tasklet(struct iwl_trans *trans) +irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) { + struct iwl_trans *trans = dev_id; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 inta = 0; @@ -804,6 +814,8 @@ void iwl_pcie_tasklet(struct iwl_trans *trans) u32 inta_mask; #endif + lock_map_acquire(&trans->sync_cmd_lockdep_map); + spin_lock_irqsave(&trans_pcie->irq_lock, flags); /* Ack/clear/reset pending uCode interrupts. @@ -848,7 +860,7 @@ void iwl_pcie_tasklet(struct iwl_trans *trans) handled |= CSR_INT_BIT_HW_ERR; - return; + goto out; } #ifdef CONFIG_IWLWIFI_DEBUG @@ -998,6 +1010,10 @@ void iwl_pcie_tasklet(struct iwl_trans *trans) /* Re-enable RF_KILL if it occurred */ else if (handled & CSR_INT_BIT_RF_KILL) iwl_enable_rfkill_int(trans); + +out: + lock_map_release(&trans->sync_cmd_lockdep_map); + return IRQ_HANDLED; } /****************************************************************************** @@ -1120,7 +1136,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data) /* Disable (but don't clear!) interrupts here to avoid * back-to-back ISRs and sporadic interrupts from our NIC. - * If we have something to service, the tasklet will re-enable ints. + * If we have something to service, the irq thread will re-enable ints. * If we *don't* have something, we'll re-enable before leaving here. */ inta_mask = iwl_read32(trans, CSR_INT_MASK); iwl_write32(trans, CSR_INT_MASK, 0x00000000); @@ -1160,9 +1176,9 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data) #endif trans_pcie->inta |= inta; - /* iwl_pcie_tasklet() will service interrupts and re-enable them */ + /* the thread will service interrupts and re-enable them */ if (likely(inta)) - tasklet_schedule(&trans_pcie->irq_tasklet); + return IRQ_WAKE_THREAD; else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && !trans_pcie->inta) iwl_enable_interrupts(trans); @@ -1270,9 +1286,10 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) trans_pcie->inta |= inta; /* iwl_pcie_tasklet() will service interrupts and re-enable them */ - if (likely(inta)) - tasklet_schedule(&trans_pcie->irq_tasklet); - else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && + if (likely(inta)) { + spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); + return IRQ_WAKE_THREAD; + } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && !trans_pcie->inta) { /* Allow interrupt if was disabled by this handler and * no tasklet was schedules, We should not enable interrupt, diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 35708b959ad6..17bedc50e753 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -75,21 +75,43 @@ #include "iwl-agn-hw.h" #include "internal.h" -static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans) +static void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, + u32 reg, u32 mask, u32 value) { -/* - * (for documentation purposes) - * to set power to V_AUX, do: + u32 v; - if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) - iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VAUX, - ~APMG_PS_CTRL_MSK_PWR_SRC); - */ +#ifdef CONFIG_IWLWIFI_DEBUG + WARN_ON_ONCE(value & ~mask); +#endif - iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, - ~APMG_PS_CTRL_MSK_PWR_SRC); + v = iwl_read32(trans, reg); + v &= ~mask; + v |= value; + iwl_write32(trans, reg, v); +} + +static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, + u32 reg, u32 mask) +{ + __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); +} + +static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, + u32 reg, u32 mask) +{ + __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); +} + +static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) +{ + if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) + iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + else + iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); } /* PCI registers */ @@ -259,7 +281,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans) spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); - iwl_pcie_set_pwr_vmain(trans); + iwl_pcie_set_pwr(trans, false); iwl_op_mode_nic_config(trans->op_mode); @@ -435,7 +457,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, } static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, - const struct fw_img *fw) + const struct fw_img *fw, bool run_in_rfkill) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; @@ -454,7 +476,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, /* If platform's RF_KILL switch is NOT set to KILL */ hw_rfkill = iwl_is_rfkill_set(trans); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); - if (hw_rfkill) + if (hw_rfkill && !run_in_rfkill) return -ERFKILL; iwl_write32(trans, CSR_INT, 0xFFFFFFFF); @@ -534,12 +556,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) iwl_enable_rfkill_int(trans); - /* wait to make sure we flush pending tasklet*/ - synchronize_irq(trans_pcie->irq); - tasklet_kill(&trans_pcie->irq_tasklet); - - cancel_work_sync(&trans_pcie->rx_replenish); - /* stop and reset the on-board processor */ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); @@ -551,46 +567,87 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) clear_bit(STATUS_RFKILL, &trans_pcie->status); } -static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans) +static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans) { /* let the ucode operate on its own */ iwl_write32(trans, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); iwl_disable_interrupts(trans); + iwl_pcie_disable_ict(trans); + iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* + * reset TX queues -- some of their registers reset during S3 + * so if we don't reset everything here the D3 image would try + * to execute some invalid memory upon resume + */ + iwl_trans_pcie_tx_reset(trans); + + iwl_pcie_set_pwr(trans, true); } -static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) +static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, + enum iwl_d3_status *status) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int err; - bool hw_rfkill; + u32 val; + int ret; - trans_pcie->inta_mask = CSR_INI_SET_MASK; + iwl_pcie_set_pwr(trans, false); - if (!trans_pcie->irq_requested) { - tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long)) - iwl_pcie_tasklet, (unsigned long)trans); + val = iwl_read32(trans, CSR_RESET); + if (val & CSR_RESET_REG_FLAG_NEVO_RESET) { + *status = IWL_D3_STATUS_RESET; + return 0; + } - iwl_pcie_alloc_ict(trans); + /* + * Also enables interrupts - none will happen as the device doesn't + * know we're waking it up, only when the opmode actually tells it + * after this call. + */ + iwl_pcie_reset_ict(trans); - err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict, - IRQF_SHARED, DRV_NAME, trans); - if (err) { - IWL_ERR(trans, "Error allocating IRQ %d\n", - trans_pcie->irq); - goto error; - } + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + 25000); + if (ret) { + IWL_ERR(trans, "Failed to resume the device (mac ready)\n"); + return ret; + } - trans_pcie->irq_requested = true; + iwl_trans_pcie_tx_reset(trans); + + ret = iwl_pcie_rx_init(trans); + if (ret) { + IWL_ERR(trans, "Failed to resume the device (RX reset)\n"); + return ret; } + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + + *status = IWL_D3_STATUS_ALIVE; + return 0; +} + +static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) +{ + bool hw_rfkill; + int err; + err = iwl_pcie_prepare_card_hw(trans); if (err) { IWL_ERR(trans, "Error while preparing HW: %d\n", err); - goto err_free_irq; + return err; } iwl_pcie_apm_init(trans); @@ -601,15 +658,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) hw_rfkill = iwl_is_rfkill_set(trans); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); - return err; - -err_free_irq: - trans_pcie->irq_requested = false; - free_irq(trans_pcie->irq, trans); -error: - iwl_pcie_free_ict(trans); - tasklet_kill(&trans_pcie->irq_tasklet); - return err; + return 0; } static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, @@ -703,19 +752,20 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, msecs_to_jiffies(trans_cfg->queue_watchdog_timeout); trans_pcie->command_names = trans_cfg->command_names; + trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; } void iwl_trans_pcie_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + synchronize_irq(trans_pcie->pci_dev->irq); + iwl_pcie_tx_free(trans); iwl_pcie_rx_free(trans); - if (trans_pcie->irq_requested == true) { - free_irq(trans_pcie->irq, trans); - iwl_pcie_free_ict(trans); - } + free_irq(trans_pcie->pci_dev->irq, trans); + iwl_pcie_free_ict(trans); pci_disable_msi(trans_pcie->pci_dev); iounmap(trans_pcie->hw_base); @@ -751,13 +801,126 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans) hw_rfkill = iwl_is_rfkill_set(trans); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); - if (!hw_rfkill) - iwl_enable_interrupts(trans); - return 0; } #endif /* CONFIG_PM_SLEEP */ +static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, + unsigned long *flags) +{ + int ret; + struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans); + spin_lock_irqsave(&pcie_trans->reg_lock, *flags); + + /* this bit wakes up the NIC */ + __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* + * These bits say the device is running, and should keep running for + * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), + * but they do not indicate that embedded SRAM is restored yet; + * 3945 and 4965 have volatile SRAM, and must save/restore contents + * to/from host DRAM when sleeping/waking for power-saving. + * Each direction takes approximately 1/4 millisecond; with this + * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a + * series of register accesses are expected (e.g. reading Event Log), + * to keep device from sleeping. + * + * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that + * SRAM is okay/restored. We don't check that here because this call + * is just for hardware register access; but GP1 MAC_SLEEP check is a + * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). + * + * 5000 series and later (including 1000 series) have non-volatile SRAM, + * and do not save/restore SRAM when power cycling. + */ + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); + if (unlikely(ret < 0)) { + iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); + if (!silent) { + u32 val = iwl_read32(trans, CSR_GP_CNTRL); + WARN_ONCE(1, + "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", + val); + spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags); + return false; + } + } + + /* + * Fool sparse by faking we release the lock - sparse will + * track nic_access anyway. + */ + __release(&pcie_trans->reg_lock); + return true; +} + +static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, + unsigned long *flags) +{ + struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&pcie_trans->reg_lock); + + /* + * Fool sparse by faking we acquiring the lock - sparse will + * track nic_access anyway. + */ + __acquire(&pcie_trans->reg_lock); + + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + /* + * Above we read the CSR_GP_CNTRL register, which will flush + * any previous writes, but we need the write that clears the + * MAC_ACCESS_REQ bit to be performed before any other writes + * scheduled on different CPUs (after we drop reg_lock). + */ + mmiowb(); + spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags); +} + +static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords) +{ + unsigned long flags; + int offs, ret = 0; + u32 *vals = buf; + + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); + for (offs = 0; offs < dwords; offs++) + vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); + iwl_trans_release_nic_access(trans, &flags); + } else { + ret = -EBUSY; + } + return ret; +} + +static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords) +{ + unsigned long flags; + int offs, ret = 0; + u32 *vals = buf; + + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); + for (offs = 0; offs < dwords; offs++) + iwl_write32(trans, HBUS_TARG_MEM_WDAT, + vals ? vals[offs] : 0); + iwl_trans_release_nic_access(trans, &flags); + } else { + ret = -EBUSY; + } + return ret; +} + #define IWL_FLUSH_WAIT_MS 2000 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) @@ -767,6 +930,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) struct iwl_queue *q; int cnt; unsigned long now = jiffies; + u32 scd_sram_addr; + u8 buf[16]; int ret = 0; /* waiting for all the tx frames complete might take a while */ @@ -780,14 +945,64 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) msleep(1); if (q->read_ptr != q->write_ptr) { - IWL_ERR(trans, "fail to flush all tx fifo queues\n"); + IWL_ERR(trans, + "fail to flush all tx fifo queues Q %d\n", cnt); ret = -ETIMEDOUT; break; } } + + if (!ret) + return 0; + + IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", + txq->q.read_ptr, txq->q.write_ptr); + + scd_sram_addr = trans_pcie->scd_base_addr + + SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); + iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); + + iwl_print_hex_error(trans, buf, sizeof(buf)); + + for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++) + IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt, + iwl_read_direct32(trans, FH_TX_TRB_REG(cnt))); + + for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { + u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt)); + u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; + bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); + u32 tbl_dw = + iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr + + SCD_TRANS_TBL_OFFSET_QUEUE(cnt)); + + if (cnt & 0x1) + tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; + else + tbl_dw = tbl_dw & 0x0000FFFF; + + IWL_ERR(trans, + "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", + cnt, active ? "" : "in", fifo, tbl_dw, + iwl_read_prph(trans, + SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1), + iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); + } + return ret; } +static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, + u32 mask, u32 value) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long flags; + + spin_lock_irqsave(&trans_pcie->reg_lock, flags); + __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); +} + static const char *get_fh_string(int cmd) { #define IWL_CMD(x) case x: return #x @@ -1212,7 +1427,8 @@ static const struct iwl_trans_ops trans_ops_pcie = { .start_fw = iwl_trans_pcie_start_fw, .stop_device = iwl_trans_pcie_stop_device, - .wowlan_suspend = iwl_trans_pcie_wowlan_suspend, + .d3_suspend = iwl_trans_pcie_d3_suspend, + .d3_resume = iwl_trans_pcie_d3_resume, .send_cmd = iwl_trans_pcie_send_hcmd, @@ -1235,8 +1451,13 @@ static const struct iwl_trans_ops trans_ops_pcie = { .read32 = iwl_trans_pcie_read32, .read_prph = iwl_trans_pcie_read_prph, .write_prph = iwl_trans_pcie_write_prph, + .read_mem = iwl_trans_pcie_read_mem, + .write_mem = iwl_trans_pcie_write_mem, .configure = iwl_trans_pcie_configure, .set_pmi = iwl_trans_pcie_set_pmi, + .grab_nic_access = iwl_trans_pcie_grab_nic_access, + .release_nic_access = iwl_trans_pcie_release_nic_access, + .set_bits_mask = iwl_trans_pcie_set_bits_mask, }; struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, @@ -1258,8 +1479,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans->ops = &trans_ops_pcie; trans->cfg = cfg; + trans_lockdep_init(trans); trans_pcie->trans = trans; spin_lock_init(&trans_pcie->irq_lock); + spin_lock_init(&trans_pcie->reg_lock); init_waitqueue_head(&trans_pcie->ucode_write_waitq); /* W/A - seems to solve weird behavior. We need to remove this if we @@ -1318,7 +1541,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } trans->dev = &pdev->dev; - trans_pcie->irq = pdev->irq; trans_pcie->pci_dev = pdev; trans->hw_rev = iwl_read32(trans, CSR_HW_REV); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; @@ -1327,7 +1549,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, /* Initialize the wait queue for commands */ init_waitqueue_head(&trans_pcie->wait_command_queue); - spin_lock_init(&trans->reg_lock); snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), "iwl_cmd_pool:%s", dev_name(trans->dev)); @@ -1344,8 +1565,24 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, if (!trans->dev_cmd_pool) goto out_pci_disable_msi; + trans_pcie->inta_mask = CSR_INI_SET_MASK; + + if (iwl_pcie_alloc_ict(trans)) + goto out_free_cmd_pool; + + if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict, + iwl_pcie_irq_handler, + IRQF_SHARED, DRV_NAME, trans)) { + IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); + goto out_free_ict; + } + return trans; +out_free_ict: + iwl_pcie_free_ict(trans); +out_free_cmd_pool: + kmem_cache_destroy(trans->dev_cmd_pool); out_pci_disable_msi: pci_disable_msi(pdev); out_pci_release_regions: diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 6c5b867c353a..8e9e3212fe78 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -160,7 +160,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", txq->q.read_ptr, txq->q.write_ptr); - iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); + iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); iwl_print_hex_error(trans, buf, sizeof(buf)); @@ -173,9 +173,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); u32 tbl_dw = - iwl_read_targ_mem(trans, - trans_pcie->scd_base_addr + - SCD_TRANS_TBL_OFFSET_QUEUE(i)); + iwl_trans_read_mem32(trans, + trans_pcie->scd_base_addr + + SCD_TRANS_TBL_OFFSET_QUEUE(i)); if (i & 0x1) tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; @@ -237,7 +237,10 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, break; } - bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); + if (trans_pcie->bc_table_dword) + len = DIV_ROUND_UP(len, 4); + + bc_ent = cpu_to_le16(len | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; @@ -306,6 +309,9 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) return; } + IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, + txq->q.write_ptr); + iwl_write_direct32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); @@ -612,7 +618,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) if (txq->q.n_bd) { dma_free_coherent(dev, sizeof(struct iwl_tfd) * txq->q.n_bd, txq->tfds, txq->q.dma_addr); - memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); + txq->q.dma_addr = 0; } kfree(txq->entries); @@ -638,9 +644,11 @@ static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask) void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 a; + int nq = trans->cfg->base_params->num_of_queues; int chan; u32 reg_val; + int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - + SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); /* make sure all queue are not stopped/used */ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); @@ -652,20 +660,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) WARN_ON(scd_base_addr != 0 && scd_base_addr != trans_pcie->scd_base_addr); - a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; - /* reset conext data memory */ - for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; - a += 4) - iwl_write_targ_mem(trans, a, 0); - /* reset tx status memory */ - for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; - a += 4) - iwl_write_targ_mem(trans, a, 0); - for (; a < trans_pcie->scd_base_addr + - SCD_TRANS_TBL_OFFSET_QUEUE( - trans->cfg->base_params->num_of_queues); - a += 4) - iwl_write_targ_mem(trans, a, 0); + /* reset context data, TX status and translation data */ + iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + + SCD_CONTEXT_MEM_LOWER_BOUND, + NULL, clear_dwords); iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, trans_pcie->scd_bc_tbls.dma >> 10); @@ -697,6 +695,29 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) APMG_PCIDEV_STT_VAL_L1_ACT_DIS); } +void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int txq_id; + + for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + txq_id++) { + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + + iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), + txq->q.dma_addr >> 8); + iwl_pcie_txq_unmap(trans, txq_id); + txq->q.read_ptr = 0; + txq->q.write_ptr = 0; + } + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, + trans_pcie->kw.dma >> 4); + + iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr); +} + /* * iwl_pcie_tx_stop - Stop all Tx DMA channels */ @@ -905,7 +926,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, if (WARN_ON(txq_id == trans_pcie->cmd_queue)) return; - spin_lock(&txq->lock); + spin_lock_bh(&txq->lock); if (txq->q.read_ptr == tfd_num) goto out; @@ -949,7 +970,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, if (iwl_queue_space(&txq->q) > txq->q.low_mark) iwl_wake_queue(trans, txq); out: - spin_unlock(&txq->lock); + spin_unlock_bh(&txq->lock); } /* @@ -1002,14 +1023,14 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, tbl_dw_addr = trans_pcie->scd_base_addr + SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); - tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr); + tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); if (txq_id & 0x1) tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); else tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); - iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw); + iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); return 0; } @@ -1068,9 +1089,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); /* Set up Tx window size and frame limit for this queue */ - iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + + iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); - iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + + iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | @@ -1101,8 +1122,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) iwl_pcie_txq_set_inactive(trans, txq_id); - _iwl_write_targ_mem_dwords(trans, stts_addr, - zero_val, ARRAY_SIZE(zero_val)); + iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, + ARRAY_SIZE(zero_val)); iwl_pcie_txq_unmap(trans, txq_id); @@ -1350,7 +1371,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, return; } - spin_lock(&txq->lock); + spin_lock_bh(&txq->lock); cmd_index = get_cmd_index(&txq->q, index); cmd = txq->entries[cmd_index].cmd; @@ -1384,7 +1405,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, meta->flags = 0; - spin_unlock(&txq->lock); + spin_unlock_bh(&txq->lock); } #define HOST_COMPLETE_TIMEOUT (2 * HZ) @@ -1642,10 +1663,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); - IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n", - le16_to_cpu(dev_cmd->hdr.sequence)); - IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); - /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index ec6d5d6b452e..116f4aba08d6 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -657,7 +657,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, capa, intvl, ie, ielen, LBS_SCAN_RSSI_TO_MBM(rssi), GFP_KERNEL); - cfg80211_put_bss(bss); + cfg80211_put_bss(wiphy, bss); } } else lbs_deb_scan("scan response: missing BSS channel IE\n"); @@ -1444,7 +1444,7 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev, done: if (bss) - cfg80211_put_bss(bss); + cfg80211_put_bss(wiphy, bss); lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } @@ -1766,7 +1766,7 @@ static void lbs_join_post(struct lbs_private *priv, params->beacon_interval, fake_ie, fake - fake_ie, 0, GFP_KERNEL); - cfg80211_put_bss(bss); + cfg80211_put_bss(priv->wdev->wiphy, bss); memcpy(priv->wdev->ssid, params->ssid, params->ssid_len); priv->wdev->ssid_len = params->ssid_len; @@ -2011,7 +2011,7 @@ static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev, if (bss) { ret = lbs_ibss_join_existing(priv, params, bss); - cfg80211_put_bss(bss); + cfg80211_put_bss(wiphy, bss); } else ret = lbs_ibss_start_new(priv, params); @@ -2081,10 +2081,8 @@ struct wireless_dev *lbs_cfg_alloc(struct device *dev) lbs_deb_enter(LBS_DEB_CFG80211); wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); - if (!wdev) { - dev_err(dev, "cannot allocate wireless device\n"); + if (!wdev) return ERR_PTR(-ENOMEM); - } wdev->wiphy = wiphy_new(&lbs_cfg80211_ops, sizeof(struct lbs_private)); if (!wdev->wiphy) { @@ -2132,6 +2130,21 @@ static void lbs_cfg_set_regulatory_hint(struct lbs_private *priv) lbs_deb_leave(LBS_DEB_CFG80211); } +static void lbs_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + + lbs_deb_enter_args(LBS_DEB_CFG80211, "cfg80211 regulatory domain " + "callback for domain %c%c\n", request->alpha2[0], + request->alpha2[1]); + + memcpy(priv->country_code, request->alpha2, sizeof(request->alpha2)); + if (lbs_iface_active(priv)) + lbs_set_11d_domain_info(priv); + + lbs_deb_leave(LBS_DEB_CFG80211); +} /* * This function get's called after lbs_setup_firmware() determined the @@ -2184,24 +2197,6 @@ int lbs_cfg_register(struct lbs_private *priv) return ret; } -int lbs_reg_notifier(struct wiphy *wiphy, - struct regulatory_request *request) -{ - struct lbs_private *priv = wiphy_priv(wiphy); - int ret = 0; - - lbs_deb_enter_args(LBS_DEB_CFG80211, "cfg80211 regulatory domain " - "callback for domain %c%c\n", request->alpha2[0], - request->alpha2[1]); - - memcpy(priv->country_code, request->alpha2, sizeof(request->alpha2)); - if (lbs_iface_active(priv)) - ret = lbs_set_11d_domain_info(priv); - - lbs_deb_leave(LBS_DEB_CFG80211); - return ret; -} - void lbs_scan_deinit(struct lbs_private *priv) { lbs_deb_enter(LBS_DEB_CFG80211); diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h index 558168ce634d..10995f59fe34 100644 --- a/drivers/net/wireless/libertas/cfg.h +++ b/drivers/net/wireless/libertas/cfg.h @@ -10,9 +10,6 @@ struct wireless_dev *lbs_cfg_alloc(struct device *dev); int lbs_cfg_register(struct lbs_private *priv); void lbs_cfg_free(struct lbs_private *priv); -int lbs_reg_notifier(struct wiphy *wiphy, - struct regulatory_request *request); - void lbs_send_disconnect_notification(struct lbs_private *priv); void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index ff9085502bea..cffdf4fbf161 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -48,6 +48,10 @@ static int channels = 1; module_param(channels, int, 0444); MODULE_PARM_DESC(channels, "Number of concurrent channels"); +static bool paged_rx = false; +module_param(paged_rx, bool, 0644); +MODULE_PARM_DESC(paged_rx, "Use paged SKBs for RX instead of linear ones"); + /** * enum hwsim_regtest - the type of regulatory tests we offer * @@ -333,11 +337,11 @@ struct mac80211_hwsim_data { int scan_chan_idx; struct ieee80211_channel *channel; - unsigned long beacon_int; /* in jiffies unit */ + u64 beacon_int /* beacon interval in us */; unsigned int rx_filter; bool started, idle, scanning; struct mutex mutex; - struct timer_list beacon_timer; + struct tasklet_hrtimer beacon_timer; enum ps_mode { PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL } ps; @@ -357,7 +361,10 @@ struct mac80211_hwsim_data { int power_level; /* difference between this hw's clock and the real clock, in usecs */ - u64 tsf_offset; + s64 tsf_offset; + s64 bcn_delta; + /* absolute beacon transmission time. Used to cover up "tx" delay. */ + u64 abs_bcn_ts; }; @@ -405,15 +412,19 @@ static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static inline u64 mac80211_hwsim_get_tsf_raw(void) +{ + return ktime_to_us(ktime_get_real()); +} + static __le64 __mac80211_hwsim_get_tsf(struct mac80211_hwsim_data *data) { - struct timeval tv = ktime_to_timeval(ktime_get_real()); - u64 now = tv.tv_sec * USEC_PER_SEC + tv.tv_usec; + u64 now = mac80211_hwsim_get_tsf_raw(); return cpu_to_le64(now + data->tsf_offset); } static u64 mac80211_hwsim_get_tsf(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = hw->priv; return le64_to_cpu(__mac80211_hwsim_get_tsf(data)); @@ -423,9 +434,13 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 tsf) { struct mac80211_hwsim_data *data = hw->priv; - struct timeval tv = ktime_to_timeval(ktime_get_real()); - u64 now = tv.tv_sec * USEC_PER_SEC + tv.tv_usec; - data->tsf_offset = tsf - now; + u64 now = mac80211_hwsim_get_tsf(hw, vif); + u32 bcn_int = data->beacon_int; + s64 delta = tsf - now; + + data->tsf_offset += delta; + /* adjust after beaconing with new timestamp at old TBTT */ + data->bcn_delta = do_div(delta, bcn_int); } static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw, @@ -696,7 +711,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_rx_status rx_status; - struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info); + u64 now; memset(&rx_status, 0, sizeof(rx_status)); rx_status.flag |= RX_FLAG_MACTIME_START; @@ -722,11 +737,23 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, secpath_reset(skb); nf_reset(skb); + /* + * Get absolute mactime here so all HWs RX at the "same time", and + * absolute TX time for beacon mactime so the timestamp matches. + * Giving beacons a different mactime than non-beacons looks messy, but + * it helps the Toffset be exact and a ~10us mactime discrepancy + * probably doesn't really matter. + */ + if (ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) + now = data->abs_bcn_ts; + else + now = mac80211_hwsim_get_tsf_raw(); + /* Copy skb to all enabled radios that are on the current frequency */ spin_lock(&hwsim_radio_lock); list_for_each_entry(data2, &hwsim_radios, list) { struct sk_buff *nskb; - struct ieee80211_mgmt *mgmt; struct tx_iter_data tx_iter_data = { .receive = false, .channel = chan, @@ -755,24 +782,30 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, * reserve some space for our vendor and the normal * radiotap header, since we're copying anyway */ - nskb = skb_copy_expand(skb, 64, 0, GFP_ATOMIC); - if (nskb == NULL) - continue; + if (skb->len < PAGE_SIZE && paged_rx) { + struct page *page = alloc_page(GFP_ATOMIC); + + if (!page) + continue; + + nskb = dev_alloc_skb(128); + if (!nskb) { + __free_page(page); + continue; + } + + memcpy(page_address(page), skb->data, skb->len); + skb_add_rx_frag(nskb, 0, page, 0, skb->len, skb->len); + } else { + nskb = skb_copy(skb, GFP_ATOMIC); + if (!nskb) + continue; + } if (mac80211_hwsim_addr_match(data2, hdr->addr1)) ack = true; - /* set bcn timestamp relative to receiver mactime */ - rx_status.mactime = - le64_to_cpu(__mac80211_hwsim_get_tsf(data2)); - mgmt = (struct ieee80211_mgmt *) nskb->data; - if (ieee80211_is_beacon(mgmt->frame_control) || - ieee80211_is_probe_resp(mgmt->frame_control)) - mgmt->u.beacon.timestamp = cpu_to_le64( - rx_status.mactime + - (data->tsf_offset - data2->tsf_offset) + - 24 * 8 * 10 / txrate->bitrate); - + rx_status.mactime = now + data2->tsf_offset; #if 0 /* * Don't enable this code by default as the OUI 00:00:00 @@ -896,7 +929,7 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *data = hw->priv; data->started = false; - del_timer(&data->beacon_timer); + tasklet_hrtimer_cancel(&data->beacon_timer); wiphy_debug(hw->wiphy, "%s\n", __func__); } @@ -962,7 +995,11 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, struct ieee80211_vif *vif) { - struct ieee80211_hw *hw = arg; + struct mac80211_hwsim_data *data = arg; + struct ieee80211_hw *hw = data->hw; + struct ieee80211_tx_info *info; + struct ieee80211_rate *txrate; + struct ieee80211_mgmt *mgmt; struct sk_buff *skb; hwsim_check_magic(vif); @@ -975,26 +1012,48 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, skb = ieee80211_beacon_get(hw, vif); if (skb == NULL) return; + info = IEEE80211_SKB_CB(skb); + txrate = ieee80211_get_tx_rate(hw, info); + + mgmt = (struct ieee80211_mgmt *) skb->data; + /* fake header transmission time */ + data->abs_bcn_ts = mac80211_hwsim_get_tsf_raw(); + mgmt->u.beacon.timestamp = cpu_to_le64(data->abs_bcn_ts + + data->tsf_offset + + 24 * 8 * 10 / txrate->bitrate); mac80211_hwsim_tx_frame(hw, skb, rcu_dereference(vif->chanctx_conf)->def.chan); } - -static void mac80211_hwsim_beacon(unsigned long arg) +static enum hrtimer_restart +mac80211_hwsim_beacon(struct hrtimer *timer) { - struct ieee80211_hw *hw = (struct ieee80211_hw *) arg; - struct mac80211_hwsim_data *data = hw->priv; + struct mac80211_hwsim_data *data = + container_of(timer, struct mac80211_hwsim_data, + beacon_timer.timer); + struct ieee80211_hw *hw = data->hw; + u64 bcn_int = data->beacon_int; + ktime_t next_bcn; if (!data->started) - return; + goto out; ieee80211_iterate_active_interfaces_atomic( hw, IEEE80211_IFACE_ITER_NORMAL, - mac80211_hwsim_beacon_tx, hw); + mac80211_hwsim_beacon_tx, data); + + /* beacon at new TBTT + beacon interval */ + if (data->bcn_delta) { + bcn_int -= data->bcn_delta; + data->bcn_delta = 0; + } - data->beacon_timer.expires = jiffies + data->beacon_int; - add_timer(&data->beacon_timer); + next_bcn = ktime_add(hrtimer_get_expires(timer), + ns_to_ktime(bcn_int * 1000)); + tasklet_hrtimer_start(&data->beacon_timer, next_bcn, HRTIMER_MODE_ABS); +out: + return HRTIMER_NORESTART; } static const char *hwsim_chantypes[] = { @@ -1032,9 +1091,16 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) data->power_level = conf->power_level; if (!data->started || !data->beacon_int) - del_timer(&data->beacon_timer); - else - mod_timer(&data->beacon_timer, jiffies + data->beacon_int); + tasklet_hrtimer_cancel(&data->beacon_timer); + else if (!hrtimer_is_queued(&data->beacon_timer.timer)) { + u64 tsf = mac80211_hwsim_get_tsf(hw, NULL); + u32 bcn_int = data->beacon_int; + u64 until_tbtt = bcn_int - do_div(tsf, bcn_int); + + tasklet_hrtimer_start(&data->beacon_timer, + ns_to_ktime(until_tbtt * 1000), + HRTIMER_MODE_REL); + } return 0; } @@ -1084,12 +1150,26 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BEACON_INT) { wiphy_debug(hw->wiphy, " BCNINT: %d\n", info->beacon_int); - data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000; - if (WARN_ON(!data->beacon_int)) - data->beacon_int = 1; - if (data->started) - mod_timer(&data->beacon_timer, - jiffies + data->beacon_int); + data->beacon_int = info->beacon_int * 1024; + } + + if (changed & BSS_CHANGED_BEACON_ENABLED) { + wiphy_debug(hw->wiphy, " BCN EN: %d\n", info->enable_beacon); + if (data->started && + !hrtimer_is_queued(&data->beacon_timer.timer) && + info->enable_beacon) { + u64 tsf, until_tbtt; + u32 bcn_int; + if (WARN_ON(!data->beacon_int)) + data->beacon_int = 1000 * 1024; + tsf = mac80211_hwsim_get_tsf(hw, vif); + bcn_int = data->beacon_int; + until_tbtt = bcn_int - do_div(tsf, bcn_int); + tasklet_hrtimer_start(&data->beacon_timer, + ns_to_ktime(until_tbtt * 1000), + HRTIMER_MODE_REL); + } else if (!info->enable_beacon) + tasklet_hrtimer_cancel(&data->beacon_timer); } if (changed & BSS_CHANGED_ERP_CTS_PROT) { @@ -1292,7 +1372,9 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw, case IEEE80211_AMPDU_TX_START: ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; - case IEEE80211_AMPDU_TX_STOP: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: @@ -2165,6 +2247,7 @@ static int __init init_mac80211_hwsim(void) /* ask mac80211 to reserve space for magic */ hw->vif_data_size = sizeof(struct hwsim_vif_priv); hw->sta_data_size = sizeof(struct hwsim_sta_priv); + hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv); memcpy(data->channels_2ghz, hwsim_channels_2ghz, sizeof(hwsim_channels_2ghz)); @@ -2370,8 +2453,9 @@ static int __init init_mac80211_hwsim(void) data->debugfs, data, &hwsim_fops_group); - setup_timer(&data->beacon_timer, mac80211_hwsim_beacon, - (unsigned long) hw); + tasklet_hrtimer_init(&data->beacon_timer, + mac80211_hwsim_beacon, + CLOCK_REALTIME, HRTIMER_MODE_ABS); list_add_tail(&data->list, &hwsim_radios); } diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c new file mode 100644 index 000000000000..cf43b3c29250 --- /dev/null +++ b/drivers/net/wireless/mwifiex/11ac.c @@ -0,0 +1,261 @@ +/* + * Marvell Wireless LAN device driver: 802.11ac + * + * Copyright (C) 2013, Marvell International Ltd. + * + * This software file (the "File") is distributed by Marvell International + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + */ + +#include "decl.h" +#include "ioctl.h" +#include "fw.h" +#include "main.h" +#include "11ac.h" + +/* This function converts the 2-bit MCS map to the highest long GI + * VHT data rate. + */ +static u16 +mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv, + u8 bands, u16 mcs_map) +{ + u8 i, nss, max_mcs; + u16 max_rate = 0; + u32 usr_vht_cap_info = 0; + struct mwifiex_adapter *adapter = priv->adapter; + /* tables of the MCS map to the highest data rate (in Mbps) + * supported for long GI + */ + u16 max_rate_lgi_80MHZ[8][3] = { + {0x124, 0x15F, 0x186}, /* NSS = 1 */ + {0x249, 0x2BE, 0x30C}, /* NSS = 2 */ + {0x36D, 0x41D, 0x492}, /* NSS = 3 */ + {0x492, 0x57C, 0x618}, /* NSS = 4 */ + {0x5B6, 0x6DB, 0x79E}, /* NSS = 5 */ + {0x6DB, 0x83A, 0x0}, /* NSS = 6 */ + {0x7FF, 0x999, 0xAAA}, /* NSS = 7 */ + {0x924, 0xAF8, 0xC30} /* NSS = 8 */ + }; + u16 max_rate_lgi_160MHZ[8][3] = { + {0x249, 0x2BE, 0x30C}, /* NSS = 1 */ + {0x492, 0x57C, 0x618}, /* NSS = 2 */ + {0x6DB, 0x83A, 0x0}, /* NSS = 3 */ + {0x924, 0xAF8, 0xC30}, /* NSS = 4 */ + {0xB6D, 0xDB6, 0xF3C}, /* NSS = 5 */ + {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */ + {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */ + {0x1248, 0x15F0, 0x1860} /* NSS = 8 */ + }; + + if (bands & BAND_AAC) + usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a; + else + usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg; + + /* find the max NSS supported */ + nss = 0; + for (i = 0; i < 8; i++) { + max_mcs = (mcs_map >> (2 * i)) & 0x3; + if (max_mcs < 3) + nss = i; + } + max_mcs = (mcs_map >> (2 * nss)) & 0x3; + + /* if max_mcs is 3, nss must be 0 (SS = 1). Thus, max mcs is MCS 9 */ + if (max_mcs >= 3) + max_mcs = 2; + + if (GET_VHTCAP_CHWDSET(usr_vht_cap_info)) { + /* support 160 MHz */ + max_rate = max_rate_lgi_160MHZ[nss][max_mcs]; + if (!max_rate) + /* MCS9 is not supported in NSS6 */ + max_rate = max_rate_lgi_160MHZ[nss][max_mcs - 1]; + } else { + max_rate = max_rate_lgi_80MHZ[nss][max_mcs]; + if (!max_rate) + /* MCS9 is not supported in NSS3 */ + max_rate = max_rate_lgi_80MHZ[nss][max_mcs - 1]; + } + + return max_rate; +} + +static void +mwifiex_fill_vht_cap_info(struct mwifiex_private *priv, + struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands) +{ + struct mwifiex_adapter *adapter = priv->adapter; + + if (bands & BAND_A) + vht_cap->vht_cap.vht_cap_info = + cpu_to_le32(adapter->usr_dot_11ac_dev_cap_a); + else + vht_cap->vht_cap.vht_cap_info = + cpu_to_le32(adapter->usr_dot_11ac_dev_cap_bg); +} + +static void +mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv, + struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands) +{ + struct mwifiex_adapter *adapter = priv->adapter; + u16 mcs_map_user, mcs_map_resp, mcs_map_result; + u16 mcs_user, mcs_resp, nss, tmp; + + /* Fill VHT cap info */ + mwifiex_fill_vht_cap_info(priv, vht_cap, bands); + + /* rx MCS Set: find the minimum of the user rx mcs and ap rx mcs */ + mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support); + mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.rx_mcs_map); + mcs_map_result = 0; + + for (nss = 1; nss <= 8; nss++) { + mcs_user = GET_VHTNSSMCS(mcs_map_user, nss); + mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss); + + if ((mcs_user == NO_NSS_SUPPORT) || + (mcs_resp == NO_NSS_SUPPORT)) + SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT); + else + SET_VHTNSSMCS(mcs_map_result, nss, + min(mcs_user, mcs_resp)); + } + + vht_cap->vht_cap.supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result); + + tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result); + vht_cap->vht_cap.supp_mcs.rx_highest = cpu_to_le16(tmp); + + /* tx MCS Set: find the minimum of the user tx mcs and ap tx mcs */ + mcs_map_user = GET_DEVTXMCSMAP(adapter->usr_dot_11ac_mcs_support); + mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.tx_mcs_map); + mcs_map_result = 0; + + for (nss = 1; nss <= 8; nss++) { + mcs_user = GET_VHTNSSMCS(mcs_map_user, nss); + mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss); + if ((mcs_user == NO_NSS_SUPPORT) || + (mcs_resp == NO_NSS_SUPPORT)) + SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT); + else + SET_VHTNSSMCS(mcs_map_result, nss, + min(mcs_user, mcs_resp)); + } + + vht_cap->vht_cap.supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result); + + tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result); + vht_cap->vht_cap.supp_mcs.tx_highest = cpu_to_le16(tmp); + + return; +} + +int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv, + struct mwifiex_bssdescriptor *bss_desc, + u8 **buffer) +{ + struct mwifiex_ie_types_vhtcap *vht_cap; + struct mwifiex_ie_types_oper_mode_ntf *oper_ntf; + struct ieee_types_oper_mode_ntf *ieee_oper_ntf; + struct mwifiex_ie_types_vht_oper *vht_op; + struct mwifiex_adapter *adapter = priv->adapter; + u8 supp_chwd_set; + u32 usr_vht_cap_info; + int ret_len = 0; + + if (bss_desc->bss_band & BAND_A) + usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a; + else + usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg; + + /* VHT Capabilities IE */ + if (bss_desc->bcn_vht_cap) { + vht_cap = (struct mwifiex_ie_types_vhtcap *)*buffer; + memset(vht_cap, 0, sizeof(*vht_cap)); + vht_cap->header.type = cpu_to_le16(WLAN_EID_VHT_CAPABILITY); + vht_cap->header.len = + cpu_to_le16(sizeof(struct ieee80211_vht_cap)); + memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header), + (u8 *)bss_desc->bcn_vht_cap + + sizeof(struct ieee_types_header), + le16_to_cpu(vht_cap->header.len)); + + mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band); + *buffer += sizeof(*vht_cap); + ret_len += sizeof(*vht_cap); + } + + /* VHT Operation IE */ + if (bss_desc->bcn_vht_oper) { + if (priv->bss_mode == HostCmd_BSS_MODE_IBSS) { + vht_op = (struct mwifiex_ie_types_vht_oper *)*buffer; + memset(vht_op, 0, sizeof(*vht_op)); + vht_op->header.type = + cpu_to_le16(WLAN_EID_VHT_OPERATION); + vht_op->header.len = cpu_to_le16(sizeof(*vht_op) - + sizeof(struct mwifiex_ie_types_header)); + memcpy((u8 *)vht_op + + sizeof(struct mwifiex_ie_types_header), + (u8 *)bss_desc->bcn_vht_oper + + sizeof(struct ieee_types_header), + le16_to_cpu(vht_op->header.len)); + + /* negotiate the channel width and central freq + * and keep the central freq as the peer suggests + */ + supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info); + + switch (supp_chwd_set) { + case 0: + vht_op->chan_width = + min_t(u8, IEEE80211_VHT_CHANWIDTH_80MHZ, + bss_desc->bcn_vht_oper->chan_width); + break; + case 1: + vht_op->chan_width = + min_t(u8, IEEE80211_VHT_CHANWIDTH_160MHZ, + bss_desc->bcn_vht_oper->chan_width); + break; + case 2: + vht_op->chan_width = + min_t(u8, IEEE80211_VHT_CHANWIDTH_80P80MHZ, + bss_desc->bcn_vht_oper->chan_width); + break; + default: + vht_op->chan_width = + IEEE80211_VHT_CHANWIDTH_USE_HT; + break; + } + + *buffer += sizeof(*vht_op); + ret_len += sizeof(*vht_op); + } + } + + /* Operating Mode Notification IE */ + if (bss_desc->oper_mode) { + ieee_oper_ntf = bss_desc->oper_mode; + oper_ntf = (void *)*buffer; + memset(oper_ntf, 0, sizeof(*oper_ntf)); + oper_ntf->header.type = cpu_to_le16(WLAN_EID_OPMODE_NOTIF); + oper_ntf->header.len = cpu_to_le16(sizeof(u8)); + oper_ntf->oper_mode = ieee_oper_ntf->oper_mode; + *buffer += sizeof(*oper_ntf); + ret_len += sizeof(*oper_ntf); + } + + return ret_len; +} diff --git a/drivers/net/wireless/mwifiex/11ac.h b/drivers/net/wireless/mwifiex/11ac.h new file mode 100644 index 000000000000..80fd1ba46200 --- /dev/null +++ b/drivers/net/wireless/mwifiex/11ac.h @@ -0,0 +1,26 @@ +/* + * Marvell Wireless LAN device driver: 802.11ac + * + * Copyright (C) 2013, Marvell International Ltd. + * + * This software file (the "File") is distributed by Marvell International + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + */ + +#ifndef _MWIFIEX_11AC_H_ +#define _MWIFIEX_11AC_H_ + +int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv, + struct mwifiex_bssdescriptor *bss_desc, + u8 **buffer); +#endif /* _MWIFIEX_11AC_H_ */ diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c index 245a371f1a43..45f19716687e 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/mwifiex/11n.c @@ -53,7 +53,9 @@ mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type, sizeof(sband->ht_cap.mcs)); if (priv->bss_mode == NL80211_IFTYPE_STATION || - sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && + (priv->adapter->sec_chan_offset != + IEEE80211_HT_PARAM_CHA_SEC_NONE))) /* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */ SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask); @@ -248,7 +250,8 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd, * - Setting HT Tx capability and HT Tx information fields * - Ensuring correct endian-ness */ -int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action, +int mwifiex_cmd_11n_cfg(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, u16 cmd_action, struct mwifiex_ds_11n_tx_cfg *txcfg) { struct host_cmd_ds_11n_cfg *htcfg = &cmd->params.htcfg; @@ -258,6 +261,10 @@ int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action, htcfg->action = cpu_to_le16(cmd_action); htcfg->ht_tx_cap = cpu_to_le16(txcfg->tx_htcap); htcfg->ht_tx_info = cpu_to_le16(txcfg->tx_htinfo); + + if (priv->adapter->is_hw_11ac_capable) + htcfg->misc_config = cpu_to_le16(txcfg->misc_config); + return 0; } @@ -398,45 +405,6 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, } /* - * This function reconfigures the Tx buffer size in firmware. - * - * This function prepares a firmware command and issues it, if - * the current Tx buffer size is different from the one requested. - * Maximum configurable Tx buffer size is limited by the HT capability - * field value. - */ -void -mwifiex_cfg_tx_buf(struct mwifiex_private *priv, - struct mwifiex_bssdescriptor *bss_desc) -{ - u16 max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_2K; - u16 tx_buf, curr_tx_buf_size = 0; - - if (bss_desc->bcn_ht_cap) { - if (le16_to_cpu(bss_desc->bcn_ht_cap->cap_info) & - IEEE80211_HT_CAP_MAX_AMSDU) - max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_8K; - else - max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_4K; - } - - tx_buf = min(priv->adapter->max_tx_buf_size, max_amsdu); - - dev_dbg(priv->adapter->dev, "info: max_amsdu=%d, max_tx_buf=%d\n", - max_amsdu, priv->adapter->max_tx_buf_size); - - if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_2K) - curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K; - else if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_4K) - curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K; - else if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_8K) - curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_8K; - if (curr_tx_buf_size != tx_buf) - mwifiex_send_cmd_async(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF, - HostCmd_ACT_GEN_SET, 0, &tx_buf); -} - -/* * This function checks if the given pointer is valid entry of * Tx BA Stream table. */ @@ -531,11 +499,8 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid, if (!mwifiex_get_ba_tbl(priv, tid, ra)) { new_node = kzalloc(sizeof(struct mwifiex_tx_ba_stream_tbl), GFP_ATOMIC); - if (!new_node) { - dev_err(priv->adapter->dev, - "%s: failed to alloc new_node\n", __func__); + if (!new_node) return; - } INIT_LIST_HEAD(&new_node->list); diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h index 46006a54a656..375db01442bf 100644 --- a/drivers/net/wireless/mwifiex/11n.h +++ b/drivers/net/wireless/mwifiex/11n.h @@ -28,14 +28,12 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv, struct host_cmd_ds_command *resp); int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv, struct host_cmd_ds_command *resp); -int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action, +int mwifiex_cmd_11n_cfg(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, u16 cmd_action, struct mwifiex_ds_11n_tx_cfg *txcfg); - int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc, u8 **buffer); -void mwifiex_cfg_tx_buf(struct mwifiex_private *priv, - struct mwifiex_bssdescriptor *bss_desc); void mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type, struct mwifiex_ie_types_htcap *); int mwifiex_set_get_11n_htcap_cfg(struct mwifiex_private *priv, diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c index 68d52cfc1ebd..af8fe6352eed 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/mwifiex/11n_aggr.c @@ -278,14 +278,16 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); break; case -1: - adapter->data_sent = false; + if (adapter->iface_type != MWIFIEX_PCIE) + adapter->data_sent = false; dev_err(adapter->dev, "%s: host_to_card failed: %#x\n", __func__, ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb_aggr, 1, ret); return 0; case -EINPROGRESS: - adapter->data_sent = false; + if (adapter->iface_type != MWIFIEX_PCIE) + adapter->data_sent = false; break; case 0: mwifiex_write_data_complete(adapter, skb_aggr, 1, ret); diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c index 4a97acd170f7..5e796f847088 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c @@ -272,11 +272,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, } /* if !tbl then create one */ new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL); - if (!new_node) { - dev_err(priv->adapter->dev, "%s: failed to alloc new_node\n", - __func__); + if (!new_node) return; - } INIT_LIST_HEAD(&new_node->list); new_node->tid = tid; diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig index b2e27723f801..4f614aad9ded 100644 --- a/drivers/net/wireless/mwifiex/Kconfig +++ b/drivers/net/wireless/mwifiex/Kconfig @@ -20,12 +20,12 @@ config MWIFIEX_SDIO mwifiex_sdio. config MWIFIEX_PCIE - tristate "Marvell WiFi-Ex Driver for PCIE 8766" + tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897" depends on MWIFIEX && PCI select FW_LOADER ---help--- This adds support for wireless adapters based on Marvell - 8766 chipset with PCIe interface. + 8766/8897 chipsets with PCIe interface. If you choose to build it as a module, it will be called mwifiex_pcie. diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile index dd0410d2d465..97b245cbafd8 100644 --- a/drivers/net/wireless/mwifiex/Makefile +++ b/drivers/net/wireless/mwifiex/Makefile @@ -23,6 +23,7 @@ mwifiex-y += util.o mwifiex-y += txrx.o mwifiex-y += wmm.o mwifiex-y += 11n.o +mwifiex-y += 11ac.o mwifiex-y += 11n_aggr.o mwifiex-y += 11n_rxreorder.o mwifiex-y += scan.o diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README index b55badef4660..3d64613ebb29 100644 --- a/drivers/net/wireless/mwifiex/README +++ b/drivers/net/wireless/mwifiex/README @@ -121,7 +121,6 @@ info wmm_ac_vi = <number of packets sent to device from WMM AcVi queue> wmm_ac_be = <number of packets sent to device from WMM AcBE queue> wmm_ac_bk = <number of packets sent to device from WMM AcBK queue> - max_tx_buf_size = <maximum Tx buffer size> tx_buf_size = <current Tx buffer size> curr_tx_buf_size = <current Tx buffer size> ps_mode = <0/1, CAM mode/PS mode> diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index cdb11b3964e2..a44023a7bd57 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -519,8 +519,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy) * - Set by user * - Set bt Country IE */ -static int mwifiex_reg_notifier(struct wiphy *wiphy, - struct regulatory_request *request) +static void mwifiex_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) { struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); @@ -540,8 +540,6 @@ static int mwifiex_reg_notifier(struct wiphy *wiphy, break; } mwifiex_send_domain_info_cmd_fw(wiphy); - - return 0; } /* @@ -836,6 +834,66 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, return ret; } +static void +mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 tx_htinfo, + struct rate_info *rate) +{ + struct mwifiex_adapter *adapter = priv->adapter; + + if (adapter->is_hw_11ac_capable) { + /* bit[1-0]: 00=LG 01=HT 10=VHT */ + if (tx_htinfo & BIT(0)) { + /* HT */ + rate->mcs = priv->tx_rate; + rate->flags |= RATE_INFO_FLAGS_MCS; + } + if (tx_htinfo & BIT(1)) { + /* VHT */ + rate->mcs = priv->tx_rate & 0x0F; + rate->flags |= RATE_INFO_FLAGS_VHT_MCS; + } + + if (tx_htinfo & (BIT(1) | BIT(0))) { + /* HT or VHT */ + switch (tx_htinfo & (BIT(3) | BIT(2))) { + case 0: + /* This will be 20MHz */ + break; + case (BIT(2)): + rate->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; + break; + case (BIT(3)): + rate->flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH; + break; + case (BIT(3) | BIT(2)): + rate->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH; + break; + } + + if (tx_htinfo & BIT(4)) + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; + + if ((priv->tx_rate >> 4) == 1) + rate->nss = 2; + else + rate->nss = 1; + } + } else { + /* + * Bit 0 in tx_htinfo indicates that current Tx rate + * is 11n rate. Valid MCS index values for us are 0 to 15. + */ + if ((tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) { + rate->mcs = priv->tx_rate; + rate->flags |= RATE_INFO_FLAGS_MCS; + if (tx_htinfo & BIT(1)) + rate->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; + if (tx_htinfo & BIT(2)) + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; + } + } +} + /* * This function dumps the station information on a buffer. * @@ -875,20 +933,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv, HostCmd_ACT_GEN_GET, DTIM_PERIOD_I, &priv->dtim_period); - /* - * Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid - * MCS index values for us are 0 to 15. - */ - if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) { - sinfo->txrate.mcs = priv->tx_rate; - sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; - /* 40MHz rate */ - if (priv->tx_htinfo & BIT(1)) - sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; - /* SGI enabled */ - if (priv->tx_htinfo & BIT(2)) - sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; - } + mwifiex_parse_htinfo(priv, priv->tx_htinfo, &sinfo->txrate); sinfo->signal_avg = priv->bcn_rssi_avg; sinfo->rx_bytes = priv->stats.rx_bytes; @@ -1297,20 +1342,22 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, /* Set appropriate bands */ if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) { bss_cfg->band_cfg = BAND_CONFIG_BG; + config_bands = BAND_B | BAND_G; - if (cfg80211_get_chandef_type(¶ms->chandef) == - NL80211_CHAN_NO_HT) - config_bands = BAND_B | BAND_G; - else - config_bands = BAND_B | BAND_G | BAND_GN; + if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT) + config_bands |= BAND_GN; + + if (params->chandef.width > NL80211_CHAN_WIDTH_40) + config_bands |= BAND_GAC; } else { bss_cfg->band_cfg = BAND_CONFIG_A; + config_bands = BAND_A; - if (cfg80211_get_chandef_type(¶ms->chandef) == - NL80211_CHAN_NO_HT) - config_bands = BAND_A; - else - config_bands = BAND_AN | BAND_A; + if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT) + config_bands |= BAND_AN; + + if (params->chandef.width > NL80211_CHAN_WIDTH_40) + config_bands |= BAND_AAC; } if (!((config_bands | priv->adapter->fw_bands) & @@ -1327,6 +1374,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, } mwifiex_set_ht_params(priv, bss_cfg, params); + mwifiex_set_wmm_params(priv, bss_cfg, params); if (params->inactivity_timeout > 0) { /* sta_ao_timer/ps_sta_ao_timer is in unit of 100ms */ @@ -1431,7 +1479,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv) bss = cfg80211_inform_bss(priv->wdev->wiphy, chan, bss_info.bssid, 0, WLAN_CAPABILITY_IBSS, 0, ie_buf, ie_len, 0, GFP_KERNEL); - cfg80211_put_bss(bss); + cfg80211_put_bss(priv->wdev->wiphy, bss); memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN); return 0; @@ -1821,10 +1869,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL); - if (!priv->user_scan_cfg) { - dev_err(priv->adapter->dev, "failed to alloc scan_req\n"); + if (!priv->user_scan_cfg) return -ENOMEM; - } priv->scan_request = request; @@ -1882,6 +1928,79 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, return 0; } +static void mwifiex_setup_vht_caps(struct ieee80211_sta_vht_cap *vht_info, + struct mwifiex_private *priv) +{ + struct mwifiex_adapter *adapter = priv->adapter; + u32 vht_cap = 0, cap = adapter->hw_dot_11ac_dev_cap; + + vht_info->vht_supported = true; + + switch (GET_VHTCAP_MAXMPDULEN(cap)) { + case 0x00: + vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; + break; + case 0x01: + vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; + break; + case 0x10: + vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; + break; + default: + dev_err(adapter->dev, "unsupported MAX MPDU len\n"); + break; + } + + if (ISSUPP_11ACVHTHTCVHT(cap)) + vht_cap |= IEEE80211_VHT_CAP_HTC_VHT; + + if (ISSUPP_11ACVHTTXOPPS(cap)) + vht_cap |= IEEE80211_VHT_CAP_VHT_TXOP_PS; + + if (ISSUPP_11ACMURXBEAMFORMEE(cap)) + vht_cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE; + + if (ISSUPP_11ACMUTXBEAMFORMEE(cap)) + vht_cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + + if (ISSUPP_11ACSUBEAMFORMER(cap)) + vht_cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; + + if (ISSUPP_11ACSUBEAMFORMEE(cap)) + vht_cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + + if (ISSUPP_11ACRXSTBC(cap)) + vht_cap |= IEEE80211_VHT_CAP_RXSTBC_1; + + if (ISSUPP_11ACTXSTBC(cap)) + vht_cap |= IEEE80211_VHT_CAP_TXSTBC; + + if (ISSUPP_11ACSGI160(cap)) + vht_cap |= IEEE80211_VHT_CAP_SHORT_GI_160; + + if (ISSUPP_11ACSGI80(cap)) + vht_cap |= IEEE80211_VHT_CAP_SHORT_GI_80; + + if (ISSUPP_11ACLDPC(cap)) + vht_cap |= IEEE80211_VHT_CAP_RXLDPC; + + if (ISSUPP_11ACBW8080(cap)) + vht_cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ; + + if (ISSUPP_11ACBW160(cap)) + vht_cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; + + vht_info->cap = vht_cap; + + /* Update MCS support for VHT */ + vht_info->vht_mcs.rx_mcs_map = cpu_to_le16( + adapter->hw_dot_11ac_mcs_support & 0xFFFF); + vht_info->vht_mcs.rx_highest = 0; + vht_info->vht_mcs.tx_mcs_map = cpu_to_le16( + adapter->hw_dot_11ac_mcs_support >> 16); + vht_info->vht_mcs.tx_highest = 0; +} + /* * This function sets up the CFG802.11 specific HT capability fields * with default values. @@ -2095,16 +2214,22 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, priv->netdev = dev; mwifiex_setup_ht_caps(&wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap, priv); + if (adapter->is_hw_11ac_capable) + mwifiex_setup_vht_caps( + &wiphy->bands[IEEE80211_BAND_2GHZ]->vht_cap, priv); if (adapter->config_bands & BAND_A) mwifiex_setup_ht_caps( &wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap, priv); + if ((adapter->config_bands & BAND_A) && adapter->is_hw_11ac_capable) + mwifiex_setup_vht_caps( + &wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap, priv); + dev_net_set(dev, wiphy_net(wiphy)); dev->ieee80211_ptr = priv->wdev; dev->ieee80211_ptr->iftype = priv->bss_mode; memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN); - memcpy(dev->perm_addr, wiphy->perm_addr, ETH_ALEN); SET_NETDEV_DEV(dev, wiphy_dev(wiphy)); dev->flags |= IFF_BROADCAST | IFF_MULTICAST; @@ -2248,6 +2373,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | + WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c index f69300f93f42..988552dece75 100644 --- a/drivers/net/wireless/mwifiex/cfp.c +++ b/drivers/net/wireless/mwifiex/cfp.c @@ -106,8 +106,8 @@ u8 *mwifiex_11d_code_2_region(u8 code) * This function maps an index in supported rates table into * the corresponding data rate. */ -u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index, - u8 ht_info) +u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv, + u8 index, u8 ht_info) { /* * For every mcs_rate line, the first 8 bytes are for stream 1x1, @@ -130,10 +130,155 @@ u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index, { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90, 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 } }; + /* AC rates */ + u16 ac_mcs_rate_nss1[8][10] = { + /* LG 160M */ + { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D, + 0x492, 0x57C, 0x618 }, + + /* SG 160M */ + { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492, + 0x514, 0x618, 0x6C6 }, + + /* LG 80M */ + { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F, + 0x249, 0x2BE, 0x30C }, + + /* SG 80M */ + { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249, + 0x28A, 0x30C, 0x363 }, + + /* LG 40M */ + { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3, + 0x10E, 0x144, 0x168 }, + + /* SG 40M */ + { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E, + 0x12C, 0x168, 0x190 }, + + /* LG 20M */ + { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 }, + + /* SG 20M */ + { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 }, + }; + /* NSS2 note: the value in the table is 2 multiplier of the actual + * rate + */ + u16 ac_mcs_rate_nss2[8][10] = { + /* LG 160M */ + { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A, + 0x924, 0xAF8, 0xC30 }, + + /* SG 160M */ + { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924, + 0xA28, 0xC30, 0xD8B }, + + /* LG 80M */ + { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D, + 0x492, 0x57C, 0x618 }, + + /* SG 80M */ + { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492, + 0x514, 0x618, 0x6C6 }, + + /* LG 40M */ + { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6, + 0x21C, 0x288, 0x2D0 }, + + /* SG 40M */ + { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C, + 0x258, 0x2D0, 0x320 }, + + /* LG 20M */ + { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104, + 0x138, 0x00 }, + + /* SG 20M */ + { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121, + 0x15B, 0x00 }, + }; + u32 rate = 0; + u8 mcs_index = 0; + u8 bw = 0; + u8 gi = 0; + + if ((ht_info & 0x3) == MWIFIEX_RATE_FORMAT_VHT) { + mcs_index = min(index & 0xF, 9); + + /* 20M: bw=0, 40M: bw=1, 80M: bw=2, 160M: bw=3 */ + bw = (ht_info & 0xC) >> 2; + + /* LGI: gi =0, SGI: gi = 1 */ + gi = (ht_info & 0x10) >> 4; + + if ((index >> 4) == 1) /* NSS = 2 */ + rate = ac_mcs_rate_nss2[2 * (3 - bw) + gi][mcs_index]; + else /* NSS = 1 */ + rate = ac_mcs_rate_nss1[2 * (3 - bw) + gi][mcs_index]; + } else if ((ht_info & 0x3) == MWIFIEX_RATE_FORMAT_HT) { + /* 20M: bw=0, 40M: bw=1 */ + bw = (ht_info & 0xC) >> 2; + + /* LGI: gi =0, SGI: gi = 1 */ + gi = (ht_info & 0x10) >> 4; + + if (index == MWIFIEX_RATE_BITMAP_MCS0) { + if (gi == 1) + rate = 0x0D; /* MCS 32 SGI rate */ + else + rate = 0x0C; /* MCS 32 LGI rate */ + } else if (index < 16) { + if ((bw == 1) || (bw == 0)) + rate = mcs_rate[2 * (1 - bw) + gi][index]; + else + rate = mwifiex_data_rates[0]; + } else { + rate = mwifiex_data_rates[0]; + } + } else { + /* 11n non-HT rates */ + if (index >= MWIFIEX_SUPPORTED_RATES_EXT) + index = 0; + rate = mwifiex_data_rates[index]; + } + + return rate; +} + +/* This function maps an index in supported rates table into + * the corresponding data rate. + */ +u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, + u8 index, u8 ht_info) +{ + /* For every mcs_rate line, the first 8 bytes are for stream 1x1, + * and all 16 bytes are for stream 2x2. + */ + u16 mcs_rate[4][16] = { + /* LGI 40M */ + { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e, + 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c }, + + /* SGI 40M */ + { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c, + 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 }, + + /* LGI 20M */ + { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82, + 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 }, + + /* SGI 20M */ + { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90, + 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 } + }; u32 mcs_num_supp = (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8; u32 rate; + if (priv->adapter->is_hw_11ac_capable) + return mwifiex_index_to_acs_data_rate(priv, index, ht_info); + if (ht_info & BIT(0)) { if (index == MWIFIEX_RATE_BITMAP_MCS0) { if (ht_info & BIT(2)) @@ -269,6 +414,7 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) { u32 k = 0; struct mwifiex_adapter *adapter = priv->adapter; + if (priv->bss_mode == NL80211_IFTYPE_STATION) { switch (adapter->config_bands) { case BAND_B: @@ -279,6 +425,7 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) break; case BAND_G: case BAND_G | BAND_GN: + case BAND_G | BAND_GN | BAND_GAC: dev_dbg(adapter->dev, "info: infra band=%d " "supported_rates_g\n", adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_g, @@ -288,7 +435,11 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) case BAND_A | BAND_B | BAND_G: case BAND_A | BAND_B: case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN: + case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC: + case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | + BAND_AAC | BAND_GAC: case BAND_B | BAND_G | BAND_GN: + case BAND_B | BAND_G | BAND_GN | BAND_GAC: dev_dbg(adapter->dev, "info: infra band=%d " "supported_rates_bg\n", adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_bg, @@ -301,14 +452,18 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) k = mwifiex_copy_rates(rates, k, supported_rates_a, sizeof(supported_rates_a)); break; + case BAND_AN: case BAND_A | BAND_AN: + case BAND_A | BAND_AN | BAND_AAC: case BAND_A | BAND_G | BAND_AN | BAND_GN: + case BAND_A | BAND_G | BAND_AN | BAND_GN | BAND_AAC: dev_dbg(adapter->dev, "info: infra band=%d " "supported_rates_a\n", adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_a, sizeof(supported_rates_a)); break; case BAND_GN: + case BAND_GN | BAND_GAC: dev_dbg(adapter->dev, "info: infra band=%d " "supported_rates_n\n", adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_n, diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 5f438e6c2155..20a6c5555873 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -24,6 +24,7 @@ #include "main.h" #include "wmm.h" #include "11n.h" +#include "11ac.h" /* * This function initializes a command node. @@ -334,20 +335,15 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_array; - u32 buf_size; u32 i; /* Allocate and initialize struct cmd_ctrl_node */ - buf_size = sizeof(struct cmd_ctrl_node) * MWIFIEX_NUM_OF_CMD_BUFFER; - cmd_array = kzalloc(buf_size, GFP_KERNEL); - if (!cmd_array) { - dev_err(adapter->dev, "%s: failed to alloc cmd_array\n", - __func__); + cmd_array = kcalloc(MWIFIEX_NUM_OF_CMD_BUFFER, + sizeof(struct cmd_ctrl_node), GFP_KERNEL); + if (!cmd_array) return -ENOMEM; - } adapter->cmd_pool = cmd_array; - memset(adapter->cmd_pool, 0, buf_size); /* Allocate and initialize command buffers */ for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) { @@ -1470,6 +1466,24 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number); adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna); + if (le32_to_cpu(hw_spec->dot_11ac_dev_cap)) { + adapter->is_hw_11ac_capable = true; + + /* Copy 11AC cap */ + adapter->hw_dot_11ac_dev_cap = + le32_to_cpu(hw_spec->dot_11ac_dev_cap); + adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap; + adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap; + + /* Copy 11AC mcs */ + adapter->hw_dot_11ac_mcs_support = + le32_to_cpu(hw_spec->dot_11ac_mcs_support); + adapter->usr_dot_11ac_mcs_support = + adapter->hw_dot_11ac_mcs_support; + } else { + adapter->is_hw_11ac_capable = false; + } + dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n", adapter->fw_release_number); dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n", diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c index 46e34aa65d1c..753b5682d53f 100644 --- a/drivers/net/wireless/mwifiex/debugfs.c +++ b/drivers/net/wireless/mwifiex/debugfs.c @@ -58,8 +58,6 @@ static struct mwifiex_debug_data items[] = { item_addr(packets_out[WMM_AC_BE]), 1}, {"wmm_ac_bk", item_size(packets_out[WMM_AC_BK]), item_addr(packets_out[WMM_AC_BK]), 1}, - {"max_tx_buf_size", item_size(max_tx_buf_size), - item_addr(max_tx_buf_size), 1}, {"tx_buf_size", item_size(tx_buf_size), item_addr(tx_buf_size), 1}, {"curr_tx_buf_size", item_size(curr_tx_buf_size), diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h index e9357d87d327..e8a569aaa2e8 100644 --- a/drivers/net/wireless/mwifiex/decl.h +++ b/drivers/net/wireless/mwifiex/decl.h @@ -26,6 +26,7 @@ #include <linux/wait.h> #include <linux/timer.h> #include <linux/ieee80211.h> +#include <net/mac80211.h> #define MWIFIEX_MAX_BSS_NUM (3) @@ -58,6 +59,8 @@ #define MWIFIEX_RTS_MAX_VALUE (2347) #define MWIFIEX_FRAG_MIN_VALUE (256) #define MWIFIEX_FRAG_MAX_VALUE (2346) +#define MWIFIEX_WMM_VERSION 0x01 +#define MWIFIEX_WMM_SUBTYPE 0x01 #define MWIFIEX_RETRY_LIMIT 14 #define MWIFIEX_SDIO_BLOCK_SIZE 256 @@ -126,4 +129,19 @@ enum mwifiex_wmm_ac_e { WMM_AC_VI, WMM_AC_VO } __packed; + +struct ieee_types_wmm_ac_parameters { + u8 aci_aifsn_bitmap; + u8 ecw_bitmap; + __le16 tx_op_limit; +} __packed; + +struct mwifiex_types_wmm_info { + u8 oui[4]; + u8 subtype; + u8 version; + u8 qos_info; + u8 reserved; + struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS]; +} __packed; #endif /* !_MWIFIEX_DECL_H_ */ diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 4dc8e2e9a889..25acb0682c56 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -49,13 +49,23 @@ struct tx_packet_hdr { #define A_SUPPORTED_RATES 9 #define HOSTCMD_SUPPORTED_RATES 14 #define N_SUPPORTED_RATES 3 -#define ALL_802_11_BANDS (BAND_A | BAND_B | BAND_G | BAND_GN) +#define ALL_802_11_BANDS (BAND_A | BAND_B | BAND_G | BAND_GN | \ + BAND_AN | BAND_GAC | BAND_AAC) -#define FW_MULTI_BANDS_SUPPORT (BIT(8) | BIT(9) | BIT(10) | BIT(11)) +#define FW_MULTI_BANDS_SUPPORT (BIT(8) | BIT(9) | BIT(10) | BIT(11) | \ + BIT(12) | BIT(13)) #define IS_SUPPORT_MULTI_BANDS(adapter) \ (adapter->fw_cap_info & FW_MULTI_BANDS_SUPPORT) + +/* shift bit 12 and bit 13 in fw_cap_info from the firmware to bit 13 and 14 + * for 11ac so that bit 11 is for GN, bit 12 for AN, bit 13 for GAC, and bit + * bit 14 for AAC, in order to be compatible with the band capability + * defined in the driver after right shift of 8 bits. + */ #define GET_FW_DEFAULT_BANDS(adapter) \ - ((adapter->fw_cap_info >> 8) & ALL_802_11_BANDS) + (((((adapter->fw_cap_info & 0x3000) << 1) | \ + (adapter->fw_cap_info & ~0xF000)) >> 8) & \ + ALL_802_11_BANDS) #define HostCmd_WEP_KEY_INDEX_MASK 0x3fff @@ -216,6 +226,47 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define LLC_SNAP_LEN 8 +/* HW_SPEC fw_cap_info */ + +#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(13)|BIT(14))) + +#define GET_VHTCAP_MAXMPDULEN(vht_cap_info) (vht_cap_info & 0x3) +#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3) +#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3) +#define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \ + (2 * (nss - 1))) +#define NO_NSS_SUPPORT 0x3 + +/* HW_SPEC: HTC-VHT supported */ +#define ISSUPP_11ACVHTHTCVHT(Dot11acDevCap) (Dot11acDevCap & BIT(22)) +/* HW_SPEC: VHT TXOP PS support */ +#define ISSUPP_11ACVHTTXOPPS(Dot11acDevCap) (Dot11acDevCap & BIT(21)) +/* HW_SPEC: MU RX beamformee support */ +#define ISSUPP_11ACMURXBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(20)) +/* HW_SPEC: MU TX beamformee support */ +#define ISSUPP_11ACMUTXBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(19)) +/* HW_SPEC: SU Beamformee support */ +#define ISSUPP_11ACSUBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(10)) +/* HW_SPEC: SU Beamformer support */ +#define ISSUPP_11ACSUBEAMFORMER(Dot11acDevCap) (Dot11acDevCap & BIT(9)) +/* HW_SPEC: Rx STBC support */ +#define ISSUPP_11ACRXSTBC(Dot11acDevCap) (Dot11acDevCap & BIT(8)) +/* HW_SPEC: Tx STBC support */ +#define ISSUPP_11ACTXSTBC(Dot11acDevCap) (Dot11acDevCap & BIT(7)) +/* HW_SPEC: Short GI support for 160MHz BW */ +#define ISSUPP_11ACSGI160(Dot11acDevCap) (Dot11acDevCap & BIT(6)) +/* HW_SPEC: Short GI support for 80MHz BW */ +#define ISSUPP_11ACSGI80(Dot11acDevCap) (Dot11acDevCap & BIT(5)) +/* HW_SPEC: LDPC coding support */ +#define ISSUPP_11ACLDPC(Dot11acDevCap) (Dot11acDevCap & BIT(4)) +/* HW_SPEC: Channel BW 20/40/80/160/80+80 MHz support */ +#define ISSUPP_11ACBW8080(Dot11acDevCap) (Dot11acDevCap & BIT(3)) +/* HW_SPEC: Channel BW 20/40/80/160 MHz support */ +#define ISSUPP_11ACBW160(Dot11acDevCap) (Dot11acDevCap & BIT(2)) + +#define GET_DEVTXMCSMAP(dev_mcs_map) (dev_mcs_map >> 16) +#define GET_DEVRXMCSMAP(dev_mcs_map) (dev_mcs_map & 0xFFFF) + #define MOD_CLASS_HR_DSSS 0x03 #define MOD_CLASS_OFDM 0x07 #define MOD_CLASS_HT 0x08 @@ -330,6 +381,9 @@ enum P2P_MODES { #define HOST_SLEEP_CFG_GPIO_DEF 0xff #define HOST_SLEEP_CFG_GAP_DEF 0 +#define MWIFIEX_TIMEOUT_FOR_AP_RESP 0xfffc +#define MWIFIEX_STATUS_CODE_AUTH_TIMEOUT 2 + #define CMD_F_HOSTCMD (1 << 0) #define CMD_F_CANCELED (1 << 1) @@ -452,9 +506,22 @@ struct rxpd { u8 rx_rate; s8 snr; s8 nf; - /* Ht Info [Bit 0] RxRate format: LG=0, HT=1 + + /* For: Non-802.11 AC cards + * + * Ht Info [Bit 0] RxRate format: LG=0, HT=1 * [Bit 1] HT Bandwidth: BW20 = 0, BW40 = 1 - * [Bit 2] HT Guard Interval: LGI = 0, SGI = 1 */ + * [Bit 2] HT Guard Interval: LGI = 0, SGI = 1 + * + * For: 802.11 AC cards + * [Bit 1] [Bit 0] RxRate format: legacy rate = 00 HT = 01 VHT = 10 + * [Bit 3] [Bit 2] HT/VHT Bandwidth BW20 = 00 BW40 = 01 + * BW80 = 10 BW160 = 11 + * [Bit 4] HT/VHT Guard interval LGI = 0 SGI = 1 + * [Bit 5] STBC support Enabled = 1 + * [Bit 6] LDPC support Enabled = 1 + * [Bit 7] Reserved + */ u8 ht_info; u8 reserved; } __packed; @@ -677,7 +744,11 @@ struct host_cmd_ds_get_hw_spec { __le32 dot_11n_dev_cap; u8 dev_mcs_support; __le16 mp_end_port; /* SDIO only, reserved for other interfacces */ - __le16 reserved_4; + __le16 mgmt_buf_count; /* mgmt IE buffer count */ + __le32 reserved_5; + __le32 reserved_6; + __le32 dot_11ac_dev_cap; + __le32 dot_11ac_mcs_support; } __packed; struct host_cmd_ds_802_11_rssi_info { @@ -783,6 +854,12 @@ union ieee_types_phy_param_set { struct ieee_types_ds_param_set ds_param_set; } __packed; +struct ieee_types_oper_mode_ntf { + u8 element_id; + u8 len; + u8 oper_mode; +} __packed; + struct host_cmd_ds_802_11_ad_hoc_start { u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 bss_mode; @@ -843,11 +920,27 @@ struct host_cmd_ds_802_11_get_log { __le32 wep_icv_err_cnt[4]; }; +/* Enumeration for rate format */ +enum _mwifiex_rate_format { + MWIFIEX_RATE_FORMAT_LG = 0, + MWIFIEX_RATE_FORMAT_HT, + MWIFIEX_RATE_FORMAT_VHT, + MWIFIEX_RATE_FORMAT_AUTO = 0xFF, +}; + struct host_cmd_ds_tx_rate_query { u8 tx_rate; - /* Ht Info [Bit 0] RxRate format: LG=0, HT=1 + /* Tx Rate Info: For 802.11 AC cards + * + * [Bit 0-1] tx rate formate: LG = 0, HT = 1, VHT = 2 + * [Bit 2-3] HT/VHT Bandwidth: BW20 = 0, BW40 = 1, BW80 = 2, BW160 = 3 + * [Bit 4] HT/VHT Guard Interval: LGI = 0, SGI = 1 + * + * For non-802.11 AC cards + * Ht Info [Bit 0] RxRate format: LG=0, HT=1 * [Bit 1] HT Bandwidth: BW20 = 0, BW40 = 1 - * [Bit 2] HT Guard Interval: LGI = 0, SGI = 1 */ + * [Bit 2] HT Guard Interval: LGI = 0, SGI = 1 + */ u8 ht_info; } __packed; @@ -1093,6 +1186,7 @@ struct host_cmd_ds_11n_cfg { __le16 action; __le16 ht_tx_cap; __le16 ht_tx_info; + __le16 misc_config; /* Needed for 802.11AC cards only */ } __packed; struct host_cmd_ds_txbuf_cfg { @@ -1131,12 +1225,6 @@ struct ieee_types_vendor_header { u8 version; } __packed; -struct ieee_types_wmm_ac_parameters { - u8 aci_aifsn_bitmap; - u8 ecw_bitmap; - __le16 tx_op_limit; -} __packed; - struct ieee_types_wmm_parameter { /* * WMM Parameter IE - Vendor Specific Header: @@ -1186,6 +1274,31 @@ struct mwifiex_ie_types_htcap { struct ieee80211_ht_cap ht_cap; } __packed; +struct mwifiex_ie_types_vhtcap { + struct mwifiex_ie_types_header header; + struct ieee80211_vht_cap vht_cap; +} __packed; + +struct mwifiex_ie_types_oper_mode_ntf { + struct mwifiex_ie_types_header header; + u8 oper_mode; +} __packed; + +/* VHT Operations IE */ +struct mwifiex_ie_types_vht_oper { + struct mwifiex_ie_types_header header; + u8 chan_width; + u8 chan_center_freq_1; + u8 chan_center_freq_2; + /* Basic MCS set map, each 2 bits stands for a NSS */ + u16 basic_mcs_map; +} __packed; + +struct mwifiex_ie_types_wmmcap { + struct mwifiex_ie_types_header header; + struct mwifiex_types_wmm_info wmm_info; +} __packed; + struct mwifiex_ie_types_htinfo { struct mwifiex_ie_types_header header; struct ieee80211_ht_operation ht_oper; diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index 39f03ce5a5b1..e38aa9b3663d 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -39,11 +39,8 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv) unsigned long flags; bss_prio = kzalloc(sizeof(struct mwifiex_bss_prio_node), GFP_KERNEL); - if (!bss_prio) { - dev_err(adapter->dev, "%s: failed to alloc bss_prio\n", - __func__); + if (!bss_prio) return -ENOMEM; - } bss_prio->priv = priv; INIT_LIST_HEAD(&bss_prio->list); @@ -317,7 +314,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) adapter->pm_wakeup_fw_try = false; - adapter->max_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K; adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K; adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K; @@ -591,6 +587,12 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter) return -1; } } + + if (adapter->if_ops.init_fw_port) { + if (adapter->if_ops.init_fw_port(adapter)) + return -1; + } + for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]) { ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta); diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h index 4e31c6013ebe..d85e6eb1f58a 100644 --- a/drivers/net/wireless/mwifiex/ioctl.h +++ b/drivers/net/wireless/mwifiex/ioctl.h @@ -20,7 +20,6 @@ #ifndef _MWIFIEX_IOCTL_H_ #define _MWIFIEX_IOCTL_H_ -#include <net/mac80211.h> #include <net/lib80211.h> enum { @@ -61,6 +60,8 @@ enum { BAND_A = 4, BAND_GN = 8, BAND_AN = 16, + BAND_GAC = 32, + BAND_AAC = 64, }; #define MWIFIEX_WPA_PASSHPHRASE_LEN 64 @@ -104,9 +105,12 @@ struct mwifiex_uap_bss_param { struct wpa_param wpa_cfg; struct wep_key wep_cfg[NUM_WEP_KEYS]; struct ieee80211_ht_cap ht_cap; + struct ieee80211_vht_cap vht_cap; u8 rates[MWIFIEX_SUPPORTED_RATES]; u32 sta_ao_timer; u32 ps_sta_ao_timer; + u8 qos_info; + struct mwifiex_types_wmm_info wmm_info; }; enum { @@ -177,7 +181,6 @@ struct mwifiex_ds_tx_ba_stream_tbl { struct mwifiex_debug_info { u32 int_counter; u32 packets_out[MAX_NUM_TID]; - u32 max_tx_buf_size; u32 tx_buf_size; u32 curr_tx_buf_size; u32 tx_tbl_num; @@ -272,6 +275,7 @@ struct mwifiex_ds_pm_cfg { struct mwifiex_ds_11n_tx_cfg { u16 tx_htcap; u16 tx_htinfo; + u16 misc_config; /* Needed for 802.11AC cards only */ }; struct mwifiex_ds_11n_amsdu_aggr_ctrl { diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 88664ae667ba..246aa62a4817 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -24,6 +24,7 @@ #include "main.h" #include "wmm.h" #include "11n.h" +#include "11ac.h" #define CAPINFO_MASK (~(BIT(15) | BIT(14) | BIT(12) | BIT(11) | BIT(9))) @@ -157,8 +158,8 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1, memset(rate1, 0, rate1_size); - for (i = 0; rate2[i] && i < rate2_size; i++) { - for (j = 0; tmp[j] && j < rate1_size; j++) { + for (i = 0; i < rate2_size && rate2[i]; i++) { + for (j = 0; j < rate1_size && tmp[j]; j++) { /* Check common rate, excluding the bit for basic rate */ if ((rate2[i] & 0x7F) == (tmp[j] & 0x7F)) { @@ -398,8 +399,6 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, pos = (u8 *) assoc; - mwifiex_cfg_tx_buf(priv, bss_desc); - cmd->command = cpu_to_le16(HostCmd_CMD_802_11_ASSOCIATE); /* Save so we know which BSS Desc to use in the response handler */ @@ -514,6 +513,12 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, priv->adapter->config_bands & BAND_AN)) mwifiex_cmd_append_11n_tlv(priv, bss_desc, &pos); + if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) && + !bss_desc->disable_11n && !bss_desc->disable_11ac && + (priv->adapter->config_bands & BAND_GAC || + priv->adapter->config_bands & BAND_AAC)) + mwifiex_cmd_append_11ac_tlv(priv, bss_desc, &pos); + /* Append vendor specific IE TLV */ mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_ASSOC, &pos); @@ -615,23 +620,33 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, struct ieee_types_assoc_rsp *assoc_rsp; struct mwifiex_bssdescriptor *bss_desc; u8 enable_data = true; + u16 cap_info, status_code; assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params; + cap_info = le16_to_cpu(assoc_rsp->cap_info_bitmap); + status_code = le16_to_cpu(assoc_rsp->status_code); + priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN, sizeof(priv->assoc_rsp_buf)); memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size); - if (le16_to_cpu(assoc_rsp->status_code)) { + if (status_code) { priv->adapter->dbg.num_cmd_assoc_failure++; dev_err(priv->adapter->dev, "ASSOC_RESP: failed, status code=%d err=%#x a_id=%#x\n", - le16_to_cpu(assoc_rsp->status_code), - le16_to_cpu(assoc_rsp->cap_info_bitmap), - le16_to_cpu(assoc_rsp->a_id)); + status_code, cap_info, le16_to_cpu(assoc_rsp->a_id)); + + if (cap_info == MWIFIEX_TIMEOUT_FOR_AP_RESP) { + if (status_code == MWIFIEX_STATUS_CODE_AUTH_TIMEOUT) + ret = WLAN_STATUS_AUTH_TIMEOUT; + else + ret = WLAN_STATUS_UNSPECIFIED_FAILURE; + } else { + ret = status_code; + } - ret = le16_to_cpu(assoc_rsp->status_code); goto done; } @@ -969,6 +984,16 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, priv->adapter->config_bands); mwifiex_fill_cap_info(priv, radio_type, ht_cap); + if (adapter->sec_chan_offset == + IEEE80211_HT_PARAM_CHA_SEC_NONE) { + u16 tmp_ht_cap; + + tmp_ht_cap = le16_to_cpu(ht_cap->ht_cap.cap_info); + tmp_ht_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + tmp_ht_cap &= ~IEEE80211_HT_CAP_SGI_40; + ht_cap->ht_cap.cap_info = cpu_to_le16(tmp_ht_cap); + } + pos += sizeof(struct mwifiex_ie_types_htcap); cmd_append_size += sizeof(struct mwifiex_ie_types_htcap); @@ -1403,6 +1428,7 @@ mwifiex_band_to_radio_type(u8 band) case BAND_A: case BAND_AN: case BAND_A | BAND_AN: + case BAND_A | BAND_AN | BAND_AAC: return HostCmd_SCAN_RADIO_TYPE_A; case BAND_B: case BAND_G: diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 1b3cfc821940..553adfb0aa81 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -295,6 +295,13 @@ struct mwifiex_bssdescriptor { u16 bss_co_2040_offset; u8 *bcn_ext_cap; u16 ext_cap_offset; + struct ieee80211_vht_cap *bcn_vht_cap; + u16 vht_cap_offset; + struct ieee80211_vht_operation *bcn_vht_oper; + u16 vht_info_offset; + struct ieee_types_oper_mode_ntf *oper_mode; + u16 oper_mode_offset; + u8 disable_11ac; struct ieee_types_vendor_specific *bcn_wpa_ie; u16 wpa_offset; struct ieee_types_generic *bcn_rsn_ie; @@ -499,6 +506,7 @@ struct mwifiex_private { u16 rsn_idx; struct timer_list scan_delay_timer; u8 ap_11n_enabled; + u8 ap_11ac_enabled; u32 mgmt_frame_mask; struct mwifiex_roc_cfg roc_cfg; }; @@ -599,8 +607,10 @@ struct mwifiex_if_ops { int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *); int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *); int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *); + int (*init_fw_port) (struct mwifiex_adapter *); int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); void (*card_reset) (struct mwifiex_adapter *); + int (*clean_pcie_ring) (struct mwifiex_adapter *adapter); }; struct mwifiex_adapter { @@ -629,7 +639,6 @@ struct mwifiex_adapter { /* spin lock for main process */ spinlock_t main_proc_lock; u32 mwifiex_processing; - u16 max_tx_buf_size; u16 tx_buf_size; u16 curr_tx_buf_size; u32 ioport; @@ -721,6 +730,15 @@ struct mwifiex_adapter { u16 max_mgmt_ie_index; u8 scan_delay_cnt; u8 empty_tx_q_cnt; + + /* 11AC */ + u32 is_hw_11ac_capable; + u32 hw_dot_11ac_dev_cap; + u32 hw_dot_11ac_mcs_support; + u32 usr_dot_11ac_dev_cap_bg; + u32 usr_dot_11ac_dev_cap_a; + u32 usr_dot_11ac_mcs_support; + atomic_t is_tx_received; atomic_t pending_bridged_pkts; }; @@ -863,8 +881,10 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, int mwifiex_cmd_802_11_bg_scan_query(struct host_cmd_ds_command *cmd); struct mwifiex_chan_freq_power *mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq); -u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index, - u8 ht_info); +u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, + u8 index, u8 ht_info); +u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv, + u8 index, u8 ht_info); u32 mwifiex_find_freq_from_band_chan(u8, u8); int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask, u8 **buffer); @@ -890,6 +910,10 @@ void mwifiex_set_ht_params(struct mwifiex_private *priv, struct cfg80211_ap_settings *params); void mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params); +void +mwifiex_set_wmm_params(struct mwifiex_private *priv, + struct mwifiex_uap_bss_param *bss_cfg, + struct cfg80211_ap_settings *params); /* * This function checks if the queuing is RA based or not. diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index b879e1338a54..4b54bcf382f3 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -39,17 +39,20 @@ static struct semaphore add_remove_card_sem; static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter); static int mwifiex_pcie_resume(struct pci_dev *pdev); -/* - * This function is called after skb allocation to update - * "skb->cb" with physical address of data pointer. - */ -static phys_addr_t *mwifiex_update_sk_buff_pa(struct sk_buff *skb) +static int +mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, + int size, int flags) { - phys_addr_t *buf_pa = MWIFIEX_SKB_PACB(skb); - - *buf_pa = (phys_addr_t)virt_to_phys(skb->data); + struct pcie_service_card *card = adapter->card; + dma_addr_t buf_pa; - return buf_pa; + buf_pa = pci_map_single(card->dev, skb->data, size, flags); + if (pci_dma_mapping_error(card->dev, buf_pa)) { + dev_err(adapter->dev, "failed to map pci memory!\n"); + return -1; + } + memcpy(skb->cb, &buf_pa, sizeof(dma_addr_t)); + return 0; } /* @@ -59,9 +62,13 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter) { u32 *cookie_addr; struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + + if (!reg->sleep_cookie) + return true; - if (card->sleep_cookie) { - cookie_addr = (u32 *)card->sleep_cookie->data; + if (card->sleep_cookie_vbase) { + cookie_addr = (u32 *)card->sleep_cookie_vbase; dev_dbg(adapter->dev, "info: ACCESS_HW: sleep cookie=0x%x\n", *cookie_addr); if (*cookie_addr == FW_AWAKE_COOKIE) @@ -91,6 +98,13 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, card->dev = pdev; + if (ent->driver_data) { + struct mwifiex_pcie_device *data = (void *)ent->driver_data; + card->pcie.firmware = data->firmware; + card->pcie.reg = data->reg; + card->pcie.blksz_fw_dl = data->blksz_fw_dl; + } + if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops, MWIFIEX_PCIE)) { pr_err("%s failed\n", __func__); @@ -227,13 +241,16 @@ static int mwifiex_pcie_resume(struct pci_dev *pdev) return 0; } -#define PCIE_VENDOR_ID_MARVELL (0x11ab) -#define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30) - static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = { { PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + .driver_data = (unsigned long) &mwifiex_pcie8766, + }, + { + PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + .driver_data = (unsigned long) &mwifiex_pcie8897, }, {}, }; @@ -286,8 +303,10 @@ static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data) static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) { int i = 0; + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - while (mwifiex_pcie_ok_to_access_hw(adapter)) { + while (reg->sleep_cookie && mwifiex_pcie_ok_to_access_hw(adapter)) { i++; usleep_range(10, 20); /* 50ms max wait */ @@ -361,14 +380,246 @@ static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter) } /* - * This function creates buffer descriptor ring for TX + * This function initializes TX buffer ring descriptors */ -static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter) +static int mwifiex_init_txq_ring(struct mwifiex_adapter *adapter) +{ + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + struct mwifiex_pcie_buf_desc *desc; + struct mwifiex_pfu_buf_desc *desc2; + int i; + + for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { + card->tx_buf_list[i] = NULL; + if (reg->pfu_enabled) { + card->txbd_ring[i] = (void *)card->txbd_ring_vbase + + (sizeof(*desc2) * i); + desc2 = card->txbd_ring[i]; + memset(desc2, 0, sizeof(*desc2)); + } else { + card->txbd_ring[i] = (void *)card->txbd_ring_vbase + + (sizeof(*desc) * i); + desc = card->txbd_ring[i]; + memset(desc, 0, sizeof(*desc)); + } + } + + return 0; +} + +/* This function initializes RX buffer ring descriptors. Each SKB is allocated + * here and after mapping PCI memory, its physical address is assigned to + * PCIE Rx buffer descriptor's physical address. + */ +static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter) +{ + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + struct sk_buff *skb; + struct mwifiex_pcie_buf_desc *desc; + struct mwifiex_pfu_buf_desc *desc2; + dma_addr_t buf_pa; + int i; + + for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { + /* Allocate skb here so that firmware can DMA data from it */ + skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE); + if (!skb) { + dev_err(adapter->dev, + "Unable to allocate skb for RX ring.\n"); + kfree(card->rxbd_ring_vbase); + return -ENOMEM; + } + + if (mwifiex_map_pci_memory(adapter, skb, + MWIFIEX_RX_DATA_BUF_SIZE, + PCI_DMA_FROMDEVICE)) + return -1; + + MWIFIEX_SKB_PACB(skb, &buf_pa); + + dev_dbg(adapter->dev, + "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", + skb, skb->len, skb->data, (u32)buf_pa, + (u32)((u64)buf_pa >> 32)); + + card->rx_buf_list[i] = skb; + if (reg->pfu_enabled) { + card->rxbd_ring[i] = (void *)card->rxbd_ring_vbase + + (sizeof(*desc2) * i); + desc2 = card->rxbd_ring[i]; + desc2->paddr = buf_pa; + desc2->len = (u16)skb->len; + desc2->frag_len = (u16)skb->len; + desc2->flags = reg->ring_flag_eop | reg->ring_flag_sop; + desc2->offset = 0; + } else { + card->rxbd_ring[i] = (void *)(card->rxbd_ring_vbase + + (sizeof(*desc) * i)); + desc = card->rxbd_ring[i]; + desc->paddr = buf_pa; + desc->len = (u16)skb->len; + desc->flags = 0; + } + } + + return 0; +} + +/* This function initializes event buffer ring descriptors. Each SKB is + * allocated here and after mapping PCI memory, its physical address is assigned + * to PCIE Rx buffer descriptor's physical address + */ +static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter) +{ + struct pcie_service_card *card = adapter->card; + struct mwifiex_evt_buf_desc *desc; + struct sk_buff *skb; + dma_addr_t buf_pa; + int i; + + for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) { + /* Allocate skb here so that firmware can DMA data from it */ + skb = dev_alloc_skb(MAX_EVENT_SIZE); + if (!skb) { + dev_err(adapter->dev, + "Unable to allocate skb for EVENT buf.\n"); + kfree(card->evtbd_ring_vbase); + return -ENOMEM; + } + skb_put(skb, MAX_EVENT_SIZE); + + if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE, + PCI_DMA_FROMDEVICE)) + return -1; + + MWIFIEX_SKB_PACB(skb, &buf_pa); + + dev_dbg(adapter->dev, + "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", + skb, skb->len, skb->data, (u32)buf_pa, + (u32)((u64)buf_pa >> 32)); + + card->evt_buf_list[i] = skb; + card->evtbd_ring[i] = (void *)(card->evtbd_ring_vbase + + (sizeof(*desc) * i)); + desc = card->evtbd_ring[i]; + desc->paddr = buf_pa; + desc->len = (u16)skb->len; + desc->flags = 0; + } + + return 0; +} + +/* This function cleans up TX buffer rings. If any of the buffer list has valid + * SKB address, associated SKB is freed. + */ +static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter) +{ + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + struct sk_buff *skb; + struct mwifiex_pcie_buf_desc *desc; + struct mwifiex_pfu_buf_desc *desc2; + int i; + + for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { + if (reg->pfu_enabled) { + desc2 = card->txbd_ring[i]; + if (card->tx_buf_list[i]) { + skb = card->tx_buf_list[i]; + pci_unmap_single(card->dev, desc2->paddr, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb_any(skb); + } + memset(desc2, 0, sizeof(*desc2)); + } else { + desc = card->txbd_ring[i]; + if (card->tx_buf_list[i]) { + skb = card->tx_buf_list[i]; + pci_unmap_single(card->dev, desc->paddr, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb_any(skb); + } + memset(desc, 0, sizeof(*desc)); + } + card->tx_buf_list[i] = NULL; + } + + return; +} + +/* This function cleans up RX buffer rings. If any of the buffer list has valid + * SKB address, associated SKB is freed. + */ +static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + struct mwifiex_pcie_buf_desc *desc; + struct mwifiex_pfu_buf_desc *desc2; struct sk_buff *skb; int i; - phys_addr_t *buf_pa; + + for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { + if (reg->pfu_enabled) { + desc2 = card->rxbd_ring[i]; + if (card->rx_buf_list[i]) { + skb = card->rx_buf_list[i]; + pci_unmap_single(card->dev, desc2->paddr, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb_any(skb); + } + memset(desc2, 0, sizeof(*desc2)); + } else { + desc = card->rxbd_ring[i]; + if (card->rx_buf_list[i]) { + skb = card->rx_buf_list[i]; + pci_unmap_single(card->dev, desc->paddr, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb_any(skb); + } + memset(desc, 0, sizeof(*desc)); + } + card->rx_buf_list[i] = NULL; + } + + return; +} + +/* This function cleans up event buffer rings. If any of the buffer list has + * valid SKB address, associated SKB is freed. + */ +static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter) +{ + struct pcie_service_card *card = adapter->card; + struct mwifiex_evt_buf_desc *desc; + struct sk_buff *skb; + int i; + + for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) { + desc = card->evtbd_ring[i]; + if (card->evt_buf_list[i]) { + skb = card->evt_buf_list[i]; + pci_unmap_single(card->dev, desc->paddr, MAX_EVENT_SIZE, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(skb); + } + card->evt_buf_list[i] = NULL; + memset(desc, 0, sizeof(*desc)); + } + + return; +} + +/* This function creates buffer descriptor ring for TX + */ +static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter) +{ + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; /* * driver maintaines the write pointer and firmware maintaines the read @@ -376,76 +627,56 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter) * starts at zero with rollover bit set */ card->txbd_wrptr = 0; - card->txbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND; + + if (reg->pfu_enabled) + card->txbd_rdptr = 0; + else + card->txbd_rdptr |= reg->tx_rollover_ind; /* allocate shared memory for the BD ring and divide the same in to several descriptors */ - card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) * - MWIFIEX_MAX_TXRX_BD; + if (reg->pfu_enabled) + card->txbd_ring_size = sizeof(struct mwifiex_pfu_buf_desc) * + MWIFIEX_MAX_TXRX_BD; + else + card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) * + MWIFIEX_MAX_TXRX_BD; + dev_dbg(adapter->dev, "info: txbd_ring: Allocating %d bytes\n", card->txbd_ring_size); - card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL); + card->txbd_ring_vbase = pci_alloc_consistent(card->dev, + card->txbd_ring_size, + &card->txbd_ring_pbase); if (!card->txbd_ring_vbase) { - dev_err(adapter->dev, "Unable to alloc buffer for txbd ring\n"); + dev_err(adapter->dev, + "allocate consistent memory (%d bytes) failed!\n", + card->txbd_ring_size); return -ENOMEM; } - card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase); - dev_dbg(adapter->dev, "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n", - card->txbd_ring_vbase, (u32)card->txbd_ring_pbase, + card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase, (u32)((u64)card->txbd_ring_pbase >> 32), card->txbd_ring_size); - for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { - card->txbd_ring[i] = (struct mwifiex_pcie_buf_desc *) - (card->txbd_ring_vbase + - (sizeof(struct mwifiex_pcie_buf_desc) - * i)); - - /* Allocate buffer here so that firmware can DMA data from it */ - skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE); - if (!skb) { - dev_err(adapter->dev, "Unable to allocate skb for TX ring.\n"); - kfree(card->txbd_ring_vbase); - return -ENOMEM; - } - buf_pa = mwifiex_update_sk_buff_pa(skb); - - skb_put(skb, MWIFIEX_RX_DATA_BUF_SIZE); - dev_dbg(adapter->dev, "info: TX ring: add new skb base: %p, " - "buf_base: %p, buf_pbase: %#x:%x, buf_len: %#x\n", - skb, skb->data, (u32)*buf_pa, - (u32)(((u64)*buf_pa >> 32)), skb->len); - - card->tx_buf_list[i] = skb; - card->txbd_ring[i]->paddr = *buf_pa; - card->txbd_ring[i]->len = (u16)skb->len; - card->txbd_ring[i]->flags = 0; - } - - return 0; + return mwifiex_init_txq_ring(adapter); } static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - int i; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { - if (card->tx_buf_list[i]) - dev_kfree_skb_any(card->tx_buf_list[i]); - card->tx_buf_list[i] = NULL; - card->txbd_ring[i]->paddr = 0; - card->txbd_ring[i]->len = 0; - card->txbd_ring[i]->flags = 0; - card->txbd_ring[i] = NULL; - } + mwifiex_cleanup_txq_ring(adapter); - kfree(card->txbd_ring_vbase); + if (card->txbd_ring_vbase) + pci_free_consistent(card->dev, card->txbd_ring_size, + card->txbd_ring_vbase, + card->txbd_ring_pbase); card->txbd_ring_size = 0; card->txbd_wrptr = 0; - card->txbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND; + card->txbd_rdptr = 0 | reg->tx_rollover_ind; card->txbd_ring_vbase = NULL; + card->txbd_ring_pbase = 0; return 0; } @@ -456,9 +687,7 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter) static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - struct sk_buff *skb; - int i; - phys_addr_t *buf_pa; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; /* * driver maintaines the read pointer and firmware maintaines the write @@ -466,19 +695,26 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) * starts at zero with rollover bit set */ card->rxbd_wrptr = 0; - card->rxbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND; + card->rxbd_rdptr = reg->rx_rollover_ind; + + if (reg->pfu_enabled) + card->rxbd_ring_size = sizeof(struct mwifiex_pfu_buf_desc) * + MWIFIEX_MAX_TXRX_BD; + else + card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) * + MWIFIEX_MAX_TXRX_BD; - card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) * - MWIFIEX_MAX_TXRX_BD; dev_dbg(adapter->dev, "info: rxbd_ring: Allocating %d bytes\n", card->rxbd_ring_size); - card->rxbd_ring_vbase = kzalloc(card->rxbd_ring_size, GFP_KERNEL); + card->rxbd_ring_vbase = pci_alloc_consistent(card->dev, + card->rxbd_ring_size, + &card->rxbd_ring_pbase); if (!card->rxbd_ring_vbase) { - dev_err(adapter->dev, "Unable to allocate buffer for " - "rxbd_ring.\n"); + dev_err(adapter->dev, + "allocate consistent memory (%d bytes) failed!\n", + card->rxbd_ring_size); return -ENOMEM; } - card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase); dev_dbg(adapter->dev, "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n", @@ -486,35 +722,7 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) (u32)((u64)card->rxbd_ring_pbase >> 32), card->rxbd_ring_size); - for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { - card->rxbd_ring[i] = (struct mwifiex_pcie_buf_desc *) - (card->rxbd_ring_vbase + - (sizeof(struct mwifiex_pcie_buf_desc) - * i)); - - /* Allocate skb here so that firmware can DMA data from it */ - skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE); - if (!skb) { - dev_err(adapter->dev, - "Unable to allocate skb for RX ring.\n"); - kfree(card->rxbd_ring_vbase); - return -ENOMEM; - } - buf_pa = mwifiex_update_sk_buff_pa(skb); - skb_put(skb, MWIFIEX_RX_DATA_BUF_SIZE); - - dev_dbg(adapter->dev, "info: RX ring: add new skb base: %p, " - "buf_base: %p, buf_pbase: %#x:%x, buf_len: %#x\n", - skb, skb->data, (u32)*buf_pa, (u32)((u64)*buf_pa >> 32), - skb->len); - - card->rx_buf_list[i] = skb; - card->rxbd_ring[i]->paddr = *buf_pa; - card->rxbd_ring[i]->len = (u16)skb->len; - card->rxbd_ring[i]->flags = 0; - } - - return 0; + return mwifiex_init_rxq_ring(adapter); } /* @@ -523,23 +731,19 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - int i; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { - if (card->rx_buf_list[i]) - dev_kfree_skb_any(card->rx_buf_list[i]); - card->rx_buf_list[i] = NULL; - card->rxbd_ring[i]->paddr = 0; - card->rxbd_ring[i]->len = 0; - card->rxbd_ring[i]->flags = 0; - card->rxbd_ring[i] = NULL; - } + mwifiex_cleanup_rxq_ring(adapter); - kfree(card->rxbd_ring_vbase); + if (card->rxbd_ring_vbase) + pci_free_consistent(card->dev, card->rxbd_ring_size, + card->rxbd_ring_vbase, + card->rxbd_ring_pbase); card->rxbd_ring_size = 0; card->rxbd_wrptr = 0; - card->rxbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND; + card->rxbd_rdptr = 0 | reg->rx_rollover_ind; card->rxbd_ring_vbase = NULL; + card->rxbd_ring_pbase = 0; return 0; } @@ -550,9 +754,7 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter) static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - struct sk_buff *skb; - int i; - phys_addr_t *buf_pa; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; /* * driver maintaines the read pointer and firmware maintaines the write @@ -560,19 +762,22 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) * starts at zero with rollover bit set */ card->evtbd_wrptr = 0; - card->evtbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND; + card->evtbd_rdptr = reg->evt_rollover_ind; + + card->evtbd_ring_size = sizeof(struct mwifiex_evt_buf_desc) * + MWIFIEX_MAX_EVT_BD; - card->evtbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) * - MWIFIEX_MAX_EVT_BD; dev_dbg(adapter->dev, "info: evtbd_ring: Allocating %d bytes\n", card->evtbd_ring_size); - card->evtbd_ring_vbase = kzalloc(card->evtbd_ring_size, GFP_KERNEL); + card->evtbd_ring_vbase = pci_alloc_consistent(card->dev, + card->evtbd_ring_size, + &card->evtbd_ring_pbase); if (!card->evtbd_ring_vbase) { dev_err(adapter->dev, - "Unable to allocate buffer. Terminating download\n"); + "allocate consistent memory (%d bytes) failed!\n", + card->evtbd_ring_size); return -ENOMEM; } - card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase); dev_dbg(adapter->dev, "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n", @@ -580,35 +785,7 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) (u32)((u64)card->evtbd_ring_pbase >> 32), card->evtbd_ring_size); - for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) { - card->evtbd_ring[i] = (struct mwifiex_pcie_buf_desc *) - (card->evtbd_ring_vbase + - (sizeof(struct mwifiex_pcie_buf_desc) - * i)); - - /* Allocate skb here so that firmware can DMA data from it */ - skb = dev_alloc_skb(MAX_EVENT_SIZE); - if (!skb) { - dev_err(adapter->dev, - "Unable to allocate skb for EVENT buf.\n"); - kfree(card->evtbd_ring_vbase); - return -ENOMEM; - } - buf_pa = mwifiex_update_sk_buff_pa(skb); - skb_put(skb, MAX_EVENT_SIZE); - - dev_dbg(adapter->dev, "info: Evt ring: add new skb. base: %p, " - "buf_base: %p, buf_pbase: %#x:%x, buf_len: %#x\n", - skb, skb->data, (u32)*buf_pa, (u32)((u64)*buf_pa >> 32), - skb->len); - - card->evt_buf_list[i] = skb; - card->evtbd_ring[i]->paddr = *buf_pa; - card->evtbd_ring[i]->len = (u16)skb->len; - card->evtbd_ring[i]->flags = 0; - } - - return 0; + return mwifiex_pcie_init_evt_ring(adapter); } /* @@ -617,23 +794,19 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - int i; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) { - if (card->evt_buf_list[i]) - dev_kfree_skb_any(card->evt_buf_list[i]); - card->evt_buf_list[i] = NULL; - card->evtbd_ring[i]->paddr = 0; - card->evtbd_ring[i]->len = 0; - card->evtbd_ring[i]->flags = 0; - card->evtbd_ring[i] = NULL; - } + mwifiex_cleanup_evt_ring(adapter); - kfree(card->evtbd_ring_vbase); + if (card->evtbd_ring_vbase) + pci_free_consistent(card->dev, card->evtbd_ring_size, + card->evtbd_ring_vbase, + card->evtbd_ring_pbase); card->evtbd_wrptr = 0; - card->evtbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND; + card->evtbd_rdptr = 0 | reg->evt_rollover_ind; card->evtbd_ring_size = 0; card->evtbd_ring_vbase = NULL; + card->evtbd_ring_pbase = 0; return 0; } @@ -653,21 +826,12 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter) "Unable to allocate skb for command response data.\n"); return -ENOMEM; } - mwifiex_update_sk_buff_pa(skb); skb_put(skb, MWIFIEX_UPLD_SIZE); - card->cmdrsp_buf = skb; + if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, + PCI_DMA_FROMDEVICE)) + return -1; - skb = NULL; - /* Allocate memory for sending command to firmware */ - skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER); - if (!skb) { - dev_err(adapter->dev, - "Unable to allocate skb for command data.\n"); - return -ENOMEM; - } - mwifiex_update_sk_buff_pa(skb); - skb_put(skb, MWIFIEX_SIZE_OF_CMD_BUFFER); - card->cmd_buf = skb; + card->cmdrsp_buf = skb; return 0; } @@ -678,18 +842,26 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter) static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter) { struct pcie_service_card *card; + dma_addr_t buf_pa; if (!adapter) return 0; card = adapter->card; - if (card && card->cmdrsp_buf) + if (card && card->cmdrsp_buf) { + MWIFIEX_SKB_PACB(card->cmdrsp_buf, &buf_pa); + pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE, + PCI_DMA_FROMDEVICE); dev_kfree_skb_any(card->cmdrsp_buf); + } - if (card && card->cmd_buf) + if (card && card->cmd_buf) { + MWIFIEX_SKB_PACB(card->cmd_buf, &buf_pa); + pci_unmap_single(card->dev, buf_pa, MWIFIEX_SIZE_OF_CMD_BUFFER, + PCI_DMA_TODEVICE); dev_kfree_skb_any(card->cmd_buf); - + } return 0; } @@ -698,27 +870,19 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter) */ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter) { - struct sk_buff *skb; struct pcie_service_card *card = adapter->card; - /* Allocate memory for sleep cookie */ - skb = dev_alloc_skb(sizeof(u32)); - if (!skb) { - dev_err(adapter->dev, - "Unable to allocate skb for sleep cookie!\n"); + card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32), + &card->sleep_cookie_pbase); + if (!card->sleep_cookie_vbase) { + dev_err(adapter->dev, "pci_alloc_consistent failed!\n"); return -ENOMEM; } - mwifiex_update_sk_buff_pa(skb); - skb_put(skb, sizeof(u32)); - /* Init val of Sleep Cookie */ - *(u32 *)skb->data = FW_AWAKE_COOKIE; + *(u32 *)card->sleep_cookie_vbase = FW_AWAKE_COOKIE; dev_dbg(adapter->dev, "alloc_scook: sleep cookie=0x%x\n", - *((u32 *)skb->data)); - - /* Save the sleep cookie */ - card->sleep_cookie = skb; + *((u32 *)card->sleep_cookie_vbase)); return 0; } @@ -735,86 +899,246 @@ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter) card = adapter->card; - if (card && card->sleep_cookie) { - dev_kfree_skb_any(card->sleep_cookie); - card->sleep_cookie = NULL; + if (card && card->sleep_cookie_vbase) { + pci_free_consistent(card->dev, sizeof(u32), + card->sleep_cookie_vbase, + card->sleep_cookie_pbase); + card->sleep_cookie_vbase = NULL; } return 0; } +/* This function flushes the TX buffer descriptor ring + * This function defined as handler is also called while cleaning TXRX + * during disconnect/ bss stop. + */ +static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter) +{ + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + u32 rdptr; + + /* Read the TX ring read pointer set by firmware */ + if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) { + dev_err(adapter->dev, + "Flush TXBD: failed to read reg->tx_rdptr\n"); + return -1; + } + + if (!mwifiex_pcie_txbd_empty(card, rdptr)) { + card->txbd_flush = 1; + /* write pointer already set at last send + * send dnld-rdy intr again, wait for completion. + */ + if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, + CPU_INTR_DNLD_RDY)) { + dev_err(adapter->dev, + "failed to assert dnld-rdy interrupt.\n"); + return -1; + } + } + return 0; +} + /* - * This function sends data buffer to device + * This function unmaps and frees downloaded data buffer */ -static int -mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb) +static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter) { + struct sk_buff *skb; + dma_addr_t buf_pa; + u32 wrdoneidx, rdptr, num_tx_buffs, unmap_count = 0; + struct mwifiex_pcie_buf_desc *desc; + struct mwifiex_pfu_buf_desc *desc2; struct pcie_service_card *card = adapter->card; - u32 wrindx, rdptr; - phys_addr_t *buf_pa; - __le16 *tmp; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (!mwifiex_pcie_ok_to_access_hw(adapter)) mwifiex_pm_wakeup_card(adapter); /* Read the TX ring read pointer set by firmware */ - if (mwifiex_read_reg(adapter, REG_TXBD_RDPTR, &rdptr)) { + if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) { dev_err(adapter->dev, - "SEND DATA: failed to read REG_TXBD_RDPTR\n"); + "SEND COMP: failed to read reg->tx_rdptr\n"); return -1; } - wrindx = card->txbd_wrptr & MWIFIEX_TXBD_MASK; + dev_dbg(adapter->dev, "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n", + card->txbd_rdptr, rdptr); - dev_dbg(adapter->dev, "info: SEND DATA: <Rd: %#x, Wr: %#x>\n", rdptr, - card->txbd_wrptr); - if (((card->txbd_wrptr & MWIFIEX_TXBD_MASK) != - (rdptr & MWIFIEX_TXBD_MASK)) || - ((card->txbd_wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) != - (rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) { - struct sk_buff *skb_data; + num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr; + /* free from previous txbd_rdptr to current txbd_rdptr */ + while (((card->txbd_rdptr & reg->tx_mask) != + (rdptr & reg->tx_mask)) || + ((card->txbd_rdptr & reg->tx_rollover_ind) != + (rdptr & reg->tx_rollover_ind))) { + wrdoneidx = (card->txbd_rdptr & reg->tx_mask) >> + reg->tx_start_ptr; + + skb = card->tx_buf_list[wrdoneidx]; + if (skb) { + dev_dbg(adapter->dev, + "SEND COMP: Detach skb %p at txbd_rdidx=%d\n", + skb, wrdoneidx); + MWIFIEX_SKB_PACB(skb, &buf_pa); + pci_unmap_single(card->dev, buf_pa, skb->len, + PCI_DMA_TODEVICE); + + unmap_count++; + + if (card->txbd_flush) + mwifiex_write_data_complete(adapter, skb, 0, + -1); + else + mwifiex_write_data_complete(adapter, skb, 0, 0); + } + + card->tx_buf_list[wrdoneidx] = NULL; + + if (reg->pfu_enabled) { + desc2 = (void *)card->txbd_ring[wrdoneidx]; + memset(desc2, 0, sizeof(*desc2)); + } else { + desc = card->txbd_ring[wrdoneidx]; + memset(desc, 0, sizeof(*desc)); + } + switch (card->dev->device) { + case PCIE_DEVICE_ID_MARVELL_88W8766P: + card->txbd_rdptr++; + break; + case PCIE_DEVICE_ID_MARVELL_88W8897: + card->txbd_rdptr += reg->ring_tx_start_ptr; + break; + } + + + if ((card->txbd_rdptr & reg->tx_mask) == num_tx_buffs) + card->txbd_rdptr = ((card->txbd_rdptr & + reg->tx_rollover_ind) ^ + reg->tx_rollover_ind); + } + + if (unmap_count) + adapter->data_sent = false; + + if (card->txbd_flush) { + if (mwifiex_pcie_txbd_empty(card, card->txbd_rdptr)) + card->txbd_flush = 0; + else + mwifiex_clean_pcie_ring_buf(adapter); + } + + return 0; +} + +/* This function sends data buffer to device. First 4 bytes of payload + * are filled with payload length and payload type. Then this payload + * is mapped to PCI device memory. Tx ring pointers are advanced accordingly. + * Download ready interrupt to FW is deffered if Tx ring is not full and + * additional payload can be accomodated. + */ +static int +mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, + struct mwifiex_tx_param *tx_param) +{ + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + u32 wrindx, num_tx_buffs, rx_val; + int ret; + dma_addr_t buf_pa; + struct mwifiex_pcie_buf_desc *desc; + struct mwifiex_pfu_buf_desc *desc2; + __le16 *tmp; + + if (!(skb->data && skb->len)) { + dev_err(adapter->dev, "%s(): invalid parameter <%p, %#x>\n", + __func__, skb->data, skb->len); + return -1; + } + + if (!mwifiex_pcie_ok_to_access_hw(adapter)) + mwifiex_pm_wakeup_card(adapter); + + num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr; + dev_dbg(adapter->dev, "info: SEND DATA: <Rd: %#x, Wr: %#x>\n", + card->txbd_rdptr, card->txbd_wrptr); + if (mwifiex_pcie_txbd_not_full(card)) { u8 *payload; adapter->data_sent = true; - skb_data = card->tx_buf_list[wrindx]; - memcpy(skb_data->data, skb->data, skb->len); - payload = skb_data->data; + payload = skb->data; tmp = (__le16 *)&payload[0]; *tmp = cpu_to_le16((u16)skb->len); tmp = (__le16 *)&payload[2]; *tmp = cpu_to_le16(MWIFIEX_TYPE_DATA); - skb_put(skb_data, MWIFIEX_RX_DATA_BUF_SIZE - skb_data->len); - skb_trim(skb_data, skb->len); - buf_pa = MWIFIEX_SKB_PACB(skb_data); - card->txbd_ring[wrindx]->paddr = *buf_pa; - card->txbd_ring[wrindx]->len = (u16)skb_data->len; - card->txbd_ring[wrindx]->flags = MWIFIEX_BD_FLAG_FIRST_DESC | - MWIFIEX_BD_FLAG_LAST_DESC; - - if ((++card->txbd_wrptr & MWIFIEX_TXBD_MASK) == - MWIFIEX_MAX_TXRX_BD) - card->txbd_wrptr = ((card->txbd_wrptr & - MWIFIEX_BD_FLAG_ROLLOVER_IND) ^ - MWIFIEX_BD_FLAG_ROLLOVER_IND); - /* Write the TX ring write pointer in to REG_TXBD_WRPTR */ - if (mwifiex_write_reg(adapter, REG_TXBD_WRPTR, - card->txbd_wrptr)) { - dev_err(adapter->dev, - "SEND DATA: failed to write REG_TXBD_WRPTR\n"); - return 0; + if (mwifiex_map_pci_memory(adapter, skb, skb->len , + PCI_DMA_TODEVICE)) + return -1; + + wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr; + MWIFIEX_SKB_PACB(skb, &buf_pa); + card->tx_buf_list[wrindx] = skb; + + if (reg->pfu_enabled) { + desc2 = (void *)card->txbd_ring[wrindx]; + desc2->paddr = buf_pa; + desc2->len = (u16)skb->len; + desc2->frag_len = (u16)skb->len; + desc2->offset = 0; + desc2->flags = MWIFIEX_BD_FLAG_FIRST_DESC | + MWIFIEX_BD_FLAG_LAST_DESC; + } else { + desc = card->txbd_ring[wrindx]; + desc->paddr = buf_pa; + desc->len = (u16)skb->len; + desc->flags = MWIFIEX_BD_FLAG_FIRST_DESC | + MWIFIEX_BD_FLAG_LAST_DESC; } - /* Send the TX ready interrupt */ - if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, - CPU_INTR_DNLD_RDY)) { + switch (card->dev->device) { + case PCIE_DEVICE_ID_MARVELL_88W8766P: + card->txbd_wrptr++; + break; + case PCIE_DEVICE_ID_MARVELL_88W8897: + card->txbd_wrptr += reg->ring_tx_start_ptr; + break; + } + + if ((card->txbd_wrptr & reg->tx_mask) == num_tx_buffs) + card->txbd_wrptr = ((card->txbd_wrptr & + reg->tx_rollover_ind) ^ + reg->tx_rollover_ind); + + rx_val = card->rxbd_rdptr & reg->rx_wrap_mask; + /* Write the TX ring write pointer in to reg->tx_wrptr */ + if (mwifiex_write_reg(adapter, reg->tx_wrptr, + card->txbd_wrptr | rx_val)) { dev_err(adapter->dev, - "SEND DATA: failed to assert door-bell intr\n"); - return -1; + "SEND DATA: failed to write reg->tx_wrptr\n"); + ret = -1; + goto done_unmap; + } + if ((mwifiex_pcie_txbd_not_full(card)) && + tx_param->next_pkt_len) { + /* have more packets and TxBD still can hold more */ + dev_dbg(adapter->dev, + "SEND DATA: delay dnld-rdy interrupt.\n"); + adapter->data_sent = false; + } else { + /* Send the TX ready interrupt */ + if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, + CPU_INTR_DNLD_RDY)) { + dev_err(adapter->dev, + "SEND DATA: failed to assert dnld-rdy interrupt.\n"); + ret = -1; + goto done_unmap; + } } dev_dbg(adapter->dev, "info: SEND DATA: Updated <Rd: %#x, Wr: " "%#x> and sent packet to firmware successfully\n", - rdptr, card->txbd_wrptr); + card->txbd_rdptr, card->txbd_wrptr); } else { dev_dbg(adapter->dev, "info: TX Ring full, can't send packets to fw\n"); @@ -827,7 +1151,17 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb) return -EBUSY; } - return 0; + return -EINPROGRESS; +done_unmap: + MWIFIEX_SKB_PACB(skb, &buf_pa); + pci_unmap_single(card->dev, buf_pa, skb->len, PCI_DMA_TODEVICE); + card->tx_buf_list[wrindx] = NULL; + if (reg->pfu_enabled) + memset(desc2, 0, sizeof(*desc2)); + else + memset(desc, 0, sizeof(*desc)); + + return ret; } /* @@ -837,78 +1171,119 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb) static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - u32 wrptr, rd_index; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + u32 wrptr, rd_index, tx_val; + dma_addr_t buf_pa; int ret = 0; struct sk_buff *skb_tmp = NULL; + struct mwifiex_pcie_buf_desc *desc; + struct mwifiex_pfu_buf_desc *desc2; + + if (!mwifiex_pcie_ok_to_access_hw(adapter)) + mwifiex_pm_wakeup_card(adapter); /* Read the RX ring Write pointer set by firmware */ - if (mwifiex_read_reg(adapter, REG_RXBD_WRPTR, &wrptr)) { + if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) { dev_err(adapter->dev, - "RECV DATA: failed to read REG_TXBD_RDPTR\n"); + "RECV DATA: failed to read reg->rx_wrptr\n"); ret = -1; goto done; } + card->rxbd_wrptr = wrptr; - while (((wrptr & MWIFIEX_RXBD_MASK) != - (card->rxbd_rdptr & MWIFIEX_RXBD_MASK)) || - ((wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) == - (card->rxbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) { + while (((wrptr & reg->rx_mask) != + (card->rxbd_rdptr & reg->rx_mask)) || + ((wrptr & reg->rx_rollover_ind) == + (card->rxbd_rdptr & reg->rx_rollover_ind))) { struct sk_buff *skb_data; u16 rx_len; + __le16 pkt_len; - rd_index = card->rxbd_rdptr & MWIFIEX_RXBD_MASK; + rd_index = card->rxbd_rdptr & reg->rx_mask; skb_data = card->rx_buf_list[rd_index]; + MWIFIEX_SKB_PACB(skb_data, &buf_pa); + pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE, + PCI_DMA_FROMDEVICE); + card->rx_buf_list[rd_index] = NULL; + /* Get data length from interface header - - first byte is len, second byte is type */ - rx_len = *((u16 *)skb_data->data); + * first 2 bytes for len, next 2 bytes is for type + */ + pkt_len = *((__le16 *)skb_data->data); + rx_len = le16_to_cpu(pkt_len); + skb_put(skb_data, rx_len); dev_dbg(adapter->dev, "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n", card->rxbd_rdptr, wrptr, rx_len); - skb_tmp = dev_alloc_skb(rx_len); + skb_pull(skb_data, INTF_HEADER_LEN); + mwifiex_handle_rx_packet(adapter, skb_data); + + skb_tmp = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE); if (!skb_tmp) { - dev_dbg(adapter->dev, - "info: Failed to alloc skb for RX\n"); - ret = -EBUSY; - goto done; + dev_err(adapter->dev, + "Unable to allocate skb.\n"); + return -ENOMEM; } - skb_put(skb_tmp, rx_len); + if (mwifiex_map_pci_memory(adapter, skb_tmp, + MWIFIEX_RX_DATA_BUF_SIZE, + PCI_DMA_FROMDEVICE)) + return -1; + + MWIFIEX_SKB_PACB(skb_tmp, &buf_pa); + + dev_dbg(adapter->dev, + "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n", + skb_tmp, rd_index); + card->rx_buf_list[rd_index] = skb_tmp; + + if (reg->pfu_enabled) { + desc2 = (void *)card->rxbd_ring[rd_index]; + desc2->paddr = buf_pa; + desc2->len = skb_tmp->len; + desc2->frag_len = skb_tmp->len; + desc2->offset = 0; + desc2->flags = reg->ring_flag_sop | reg->ring_flag_eop; + } else { + desc = card->rxbd_ring[rd_index]; + desc->paddr = buf_pa; + desc->len = skb_tmp->len; + desc->flags = 0; + } - memcpy(skb_tmp->data, skb_data->data + INTF_HEADER_LEN, rx_len); - if ((++card->rxbd_rdptr & MWIFIEX_RXBD_MASK) == + if ((++card->rxbd_rdptr & reg->rx_mask) == MWIFIEX_MAX_TXRX_BD) { card->rxbd_rdptr = ((card->rxbd_rdptr & - MWIFIEX_BD_FLAG_ROLLOVER_IND) ^ - MWIFIEX_BD_FLAG_ROLLOVER_IND); + reg->rx_rollover_ind) ^ + reg->rx_rollover_ind); } dev_dbg(adapter->dev, "info: RECV DATA: <Rd: %#x, Wr: %#x>\n", card->rxbd_rdptr, wrptr); - /* Write the RX ring read pointer in to REG_RXBD_RDPTR */ - if (mwifiex_write_reg(adapter, REG_RXBD_RDPTR, - card->rxbd_rdptr)) { + tx_val = card->txbd_wrptr & reg->tx_wrap_mask; + /* Write the RX ring read pointer in to reg->rx_rdptr */ + if (mwifiex_write_reg(adapter, reg->rx_rdptr, + card->rxbd_rdptr | tx_val)) { dev_err(adapter->dev, - "RECV DATA: failed to write REG_RXBD_RDPTR\n"); + "RECV DATA: failed to write reg->rx_rdptr\n"); ret = -1; goto done; } /* Read the RX ring Write pointer set by firmware */ - if (mwifiex_read_reg(adapter, REG_RXBD_WRPTR, &wrptr)) { + if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) { dev_err(adapter->dev, - "RECV DATA: failed to read REG_TXBD_RDPTR\n"); + "RECV DATA: failed to read reg->rx_wrptr\n"); ret = -1; goto done; } dev_dbg(adapter->dev, "info: RECV DATA: Rcvd packet from fw successfully\n"); - mwifiex_handle_rx_packet(adapter, skb_tmp); + card->rxbd_wrptr = wrptr; } done: - if (ret && skb_tmp) - dev_kfree_skb_any(skb_tmp); return ret; } @@ -918,40 +1293,54 @@ done: static int mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) { - phys_addr_t *buf_pa = MWIFIEX_SKB_PACB(skb); + dma_addr_t buf_pa; + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - if (!(skb->data && skb->len && *buf_pa)) { + if (!(skb->data && skb->len)) { dev_err(adapter->dev, - "Invalid parameter in %s <%p, %#x:%x, %x>\n", - __func__, skb->data, skb->len, - (u32)*buf_pa, (u32)((u64)*buf_pa >> 32)); + "Invalid parameter in %s <%p. len %d>\n", + __func__, skb->data, skb->len); return -1; } - /* Write the lower 32bits of the physical address to scratch - * register 0 */ - if (mwifiex_write_reg(adapter, PCIE_SCRATCH_0_REG, (u32)*buf_pa)) { + if (mwifiex_map_pci_memory(adapter, skb, skb->len , PCI_DMA_TODEVICE)) + return -1; + + MWIFIEX_SKB_PACB(skb, &buf_pa); + + /* Write the lower 32bits of the physical address to low command + * address scratch register + */ + if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) { dev_err(adapter->dev, "%s: failed to write download command to boot code.\n", __func__); + pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE, + PCI_DMA_TODEVICE); return -1; } - /* Write the upper 32bits of the physical address to scratch - * register 1 */ - if (mwifiex_write_reg(adapter, PCIE_SCRATCH_1_REG, - (u32)((u64)*buf_pa >> 32))) { + /* Write the upper 32bits of the physical address to high command + * address scratch register + */ + if (mwifiex_write_reg(adapter, reg->cmd_addr_hi, + (u32)((u64)buf_pa >> 32))) { dev_err(adapter->dev, "%s: failed to write download command to boot code.\n", __func__); + pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE, + PCI_DMA_TODEVICE); return -1; } - /* Write the command length to scratch register 2 */ - if (mwifiex_write_reg(adapter, PCIE_SCRATCH_2_REG, skb->len)) { + /* Write the command length to cmd_size scratch register */ + if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) { dev_err(adapter->dev, - "%s: failed to write command len to scratch reg 2\n", + "%s: failed to write command len to cmd_size scratch reg\n", __func__); + pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE, + PCI_DMA_TODEVICE); return -1; } @@ -960,22 +1349,43 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) CPU_INTR_DOOR_BELL)) { dev_err(adapter->dev, "%s: failed to assert door-bell intr\n", __func__); + pci_unmap_single(card->dev, buf_pa, + MWIFIEX_UPLD_SIZE, PCI_DMA_TODEVICE); return -1; } return 0; } -/* - * This function downloads commands to the device +/* This function init rx port in firmware which in turn enables to receive data + * from device before transmitting any packet. + */ +static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter) +{ + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + int tx_wrap = card->txbd_wrptr & reg->tx_wrap_mask; + + /* Write the RX ring read pointer in to reg->rx_rdptr */ + if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr | + tx_wrap)) { + dev_err(adapter->dev, + "RECV DATA: failed to write reg->rx_rdptr\n"); + return -1; + } + return 0; +} + +/* This function downloads commands to the device */ static int mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) { struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; int ret = 0; - phys_addr_t *cmd_buf_pa; - phys_addr_t *cmdrsp_buf_pa; + dma_addr_t cmd_buf_pa, cmdrsp_buf_pa; + u8 *payload = (u8 *)skb->data; if (!(skb->data && skb->len)) { dev_err(adapter->dev, "Invalid parameter in %s <%p, %#x>\n", @@ -990,21 +1400,22 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) return -EBUSY; } - /* Make sure a command buffer is available */ - if (!card->cmd_buf) { - dev_err(adapter->dev, "Command buffer not available\n"); - return -EBUSY; - } + if (!mwifiex_pcie_ok_to_access_hw(adapter)) + mwifiex_pm_wakeup_card(adapter); adapter->cmd_sent = true; - /* Copy the given skb in to DMA accessable shared buffer */ - skb_put(card->cmd_buf, MWIFIEX_SIZE_OF_CMD_BUFFER - card->cmd_buf->len); - skb_trim(card->cmd_buf, skb->len); - memcpy(card->cmd_buf->data, skb->data, skb->len); + + *(__le16 *)&payload[0] = cpu_to_le16((u16)skb->len); + *(__le16 *)&payload[2] = cpu_to_le16(MWIFIEX_TYPE_CMD); + + if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE)) + return -1; + + card->cmd_buf = skb; /* To send a command, the driver will: 1. Write the 64bit physical address of the data buffer to - SCRATCH1 + SCRATCH0 + cmd response address low + cmd response address high 2. Ring the door bell (i.e. set the door bell interrupt) In response to door bell interrupt, the firmware will perform @@ -1013,11 +1424,11 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) */ if (card->cmdrsp_buf) { - cmdrsp_buf_pa = MWIFIEX_SKB_PACB(card->cmdrsp_buf); + MWIFIEX_SKB_PACB(card->cmdrsp_buf, &cmdrsp_buf_pa); /* Write the lower 32bits of the cmdrsp buffer physical address */ - if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_LO, - (u32)*cmdrsp_buf_pa)) { + if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, + (u32)cmdrsp_buf_pa)) { dev_err(adapter->dev, "Failed to write download cmd to boot code.\n"); ret = -1; @@ -1025,8 +1436,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) } /* Write the upper 32bits of the cmdrsp buffer physical address */ - if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_HI, - (u32)((u64)*cmdrsp_buf_pa >> 32))) { + if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, + (u32)((u64)cmdrsp_buf_pa >> 32))) { dev_err(adapter->dev, "Failed to write download cmd to boot code.\n"); ret = -1; @@ -1034,27 +1445,29 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) } } - cmd_buf_pa = MWIFIEX_SKB_PACB(card->cmd_buf); - /* Write the lower 32bits of the physical address to REG_CMD_ADDR_LO */ - if (mwifiex_write_reg(adapter, REG_CMD_ADDR_LO, (u32)*cmd_buf_pa)) { + MWIFIEX_SKB_PACB(card->cmd_buf, &cmd_buf_pa); + /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */ + if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, + (u32)cmd_buf_pa)) { dev_err(adapter->dev, "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } - /* Write the upper 32bits of the physical address to REG_CMD_ADDR_HI */ - if (mwifiex_write_reg(adapter, REG_CMD_ADDR_HI, - (u32)((u64)*cmd_buf_pa >> 32))) { + /* Write the upper 32bits of the physical address to reg->cmd_addr_hi */ + if (mwifiex_write_reg(adapter, reg->cmd_addr_hi, + (u32)((u64)cmd_buf_pa >> 32))) { dev_err(adapter->dev, "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } - /* Write the command length to REG_CMD_SIZE */ - if (mwifiex_write_reg(adapter, REG_CMD_SIZE, card->cmd_buf->len)) { + /* Write the command length to reg->cmd_size */ + if (mwifiex_write_reg(adapter, reg->cmd_size, + card->cmd_buf->len)) { dev_err(adapter->dev, - "Failed to write cmd len to REG_CMD_SIZE\n"); + "Failed to write cmd len to reg->cmd_size\n"); ret = -1; goto done; } @@ -1081,18 +1494,30 @@ done: static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; struct sk_buff *skb = card->cmdrsp_buf; int count = 0; + u16 rx_len; + __le16 pkt_len; + dma_addr_t buf_pa; dev_dbg(adapter->dev, "info: Rx CMD Response\n"); + MWIFIEX_SKB_PACB(skb, &buf_pa); + pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE, + PCI_DMA_FROMDEVICE); + + pkt_len = *((__le16 *)skb->data); + rx_len = le16_to_cpu(pkt_len); + skb_trim(skb, rx_len); + skb_pull(skb, INTF_HEADER_LEN); + if (!adapter->curr_cmd) { - skb_pull(skb, INTF_HEADER_LEN); if (adapter->ps_state == PS_STATE_SLEEP_CFM) { mwifiex_process_sleep_confirm_resp(adapter, skb->data, skb->len); - while (mwifiex_pcie_ok_to_access_hw(adapter) && - (count++ < 10)) + while (reg->sleep_cookie && (count++ < 10) && + mwifiex_pcie_ok_to_access_hw(adapter)) usleep_range(50, 60); } else { dev_err(adapter->dev, @@ -1100,9 +1525,12 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) } memcpy(adapter->upld_buf, skb->data, min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); - skb_push(skb, INTF_HEADER_LEN); + if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, + PCI_DMA_FROMDEVICE)) + return -1; + + MWIFIEX_SKB_PACB(skb, &buf_pa); } else if (mwifiex_pcie_ok_to_access_hw(adapter)) { - skb_pull(skb, INTF_HEADER_LEN); adapter->curr_cmd->resp_skb = skb; adapter->cmd_resp_received = true; /* Take the pointer and set it to CMD node and will @@ -1112,14 +1540,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) /* Clear the cmd-rsp buffer address in scratch registers. This will prevent firmware from writing to the same response buffer again. */ - if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_LO, 0)) { + if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) { dev_err(adapter->dev, "cmd_done: failed to clear cmd_rsp_addr_lo\n"); return -1; } /* Write the upper 32bits of the cmdrsp buffer physical address */ - if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_HI, 0)) { + if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) { dev_err(adapter->dev, "cmd_done: failed to clear cmd_rsp_addr_hi\n"); return -1; @@ -1136,10 +1564,23 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter, struct sk_buff *skb) { struct pcie_service_card *card = adapter->card; + dma_addr_t buf_pa; + struct sk_buff *skb_tmp; if (skb) { card->cmdrsp_buf = skb; skb_push(card->cmdrsp_buf, INTF_HEADER_LEN); + if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, + PCI_DMA_FROMDEVICE)) + return -1; + } + + skb_tmp = card->cmd_buf; + if (skb_tmp) { + MWIFIEX_SKB_PACB(skb_tmp, &buf_pa); + pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE, + PCI_DMA_FROMDEVICE); + card->cmd_buf = NULL; } return 0; @@ -1151,8 +1592,14 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter, static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK; u32 wrptr, event; + dma_addr_t buf_pa; + struct mwifiex_evt_buf_desc *desc; + + if (!mwifiex_pcie_ok_to_access_hw(adapter)) + mwifiex_pm_wakeup_card(adapter); if (adapter->event_received) { dev_dbg(adapter->dev, "info: Event being processed, " @@ -1166,9 +1613,9 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) } /* Read the event ring write pointer set by firmware */ - if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) { + if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) { dev_err(adapter->dev, - "EventReady: failed to read REG_EVTBD_WRPTR\n"); + "EventReady: failed to read reg->evt_wrptr\n"); return -1; } @@ -1176,20 +1623,23 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) card->evtbd_rdptr, wrptr); if (((wrptr & MWIFIEX_EVTBD_MASK) != (card->evtbd_rdptr & MWIFIEX_EVTBD_MASK)) || - ((wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) == - (card->evtbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) { + ((wrptr & reg->evt_rollover_ind) == + (card->evtbd_rdptr & reg->evt_rollover_ind))) { struct sk_buff *skb_cmd; __le16 data_len = 0; u16 evt_len; dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr); skb_cmd = card->evt_buf_list[rdptr]; + MWIFIEX_SKB_PACB(skb_cmd, &buf_pa); + pci_unmap_single(card->dev, buf_pa, MAX_EVENT_SIZE, + PCI_DMA_FROMDEVICE); + /* Take the pointer and set it to event pointer in adapter and will return back after event handling callback */ card->evt_buf_list[rdptr] = NULL; - card->evtbd_ring[rdptr]->paddr = 0; - card->evtbd_ring[rdptr]->len = 0; - card->evtbd_ring[rdptr]->flags = 0; + desc = card->evtbd_ring[rdptr]; + memset(desc, 0, sizeof(*desc)); event = *(u32 *) &skb_cmd->data[INTF_HEADER_LEN]; adapter->event_cause = event; @@ -1225,10 +1675,12 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, struct sk_buff *skb) { struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; int ret = 0; u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK; u32 wrptr; - phys_addr_t *buf_pa; + dma_addr_t buf_pa; + struct mwifiex_evt_buf_desc *desc; if (!skb) return 0; @@ -1240,19 +1692,25 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, } /* Read the event ring write pointer set by firmware */ - if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) { + if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) { dev_err(adapter->dev, - "event_complete: failed to read REG_EVTBD_WRPTR\n"); + "event_complete: failed to read reg->evt_wrptr\n"); return -1; } if (!card->evt_buf_list[rdptr]) { skb_push(skb, INTF_HEADER_LEN); + if (mwifiex_map_pci_memory(adapter, skb, + MAX_EVENT_SIZE, + PCI_DMA_FROMDEVICE)) + return -1; + MWIFIEX_SKB_PACB(skb, &buf_pa); card->evt_buf_list[rdptr] = skb; - buf_pa = MWIFIEX_SKB_PACB(skb); - card->evtbd_ring[rdptr]->paddr = *buf_pa; - card->evtbd_ring[rdptr]->len = (u16)skb->len; - card->evtbd_ring[rdptr]->flags = 0; + MWIFIEX_SKB_PACB(skb, &buf_pa); + desc = card->evtbd_ring[rdptr]; + desc->paddr = buf_pa; + desc->len = (u16)skb->len; + desc->flags = 0; skb = NULL; } else { dev_dbg(adapter->dev, @@ -1262,17 +1720,18 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) { card->evtbd_rdptr = ((card->evtbd_rdptr & - MWIFIEX_BD_FLAG_ROLLOVER_IND) ^ - MWIFIEX_BD_FLAG_ROLLOVER_IND); + reg->evt_rollover_ind) ^ + reg->evt_rollover_ind); } dev_dbg(adapter->dev, "info: Updated <Rd: 0x%x, Wr: 0x%x>", card->evtbd_rdptr, wrptr); - /* Write the event ring read pointer in to REG_EVTBD_RDPTR */ - if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) { + /* Write the event ring read pointer in to reg->evt_rdptr */ + if (mwifiex_write_reg(adapter, reg->evt_rdptr, + card->evtbd_rdptr)) { dev_err(adapter->dev, - "event_complete: failed to read REG_EVTBD_RDPTR\n"); + "event_complete: failed to read reg->evt_rdptr\n"); return -1; } @@ -1299,11 +1758,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, struct sk_buff *skb; u32 txlen, tx_blocks = 0, tries, len; u32 block_retry_cnt = 0; - - if (!adapter) { - pr_err("adapter structure is not valid\n"); - return -1; - } + dma_addr_t buf_pa; + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (!firmware || !firmware_len) { dev_err(adapter->dev, @@ -1325,7 +1782,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, ret = -ENOMEM; goto done; } - mwifiex_update_sk_buff_pa(skb); /* Perform firmware data transfer */ do { @@ -1336,7 +1792,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, break; for (tries = 0; tries < MAX_POLL_TRIES; tries++) { - ret = mwifiex_read_reg(adapter, PCIE_SCRATCH_2_REG, + ret = mwifiex_read_reg(adapter, reg->cmd_size, &len); if (ret) { dev_warn(adapter->dev, @@ -1382,16 +1838,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, dev_dbg(adapter->dev, "."); - tx_blocks = (txlen + - MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD - 1) / - MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD; + tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) / + card->pcie.blksz_fw_dl; /* Copy payload to buffer */ memmove(skb->data, &firmware[offset], txlen); } skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len); - skb_trim(skb, tx_blocks * MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD); + skb_trim(skb, tx_blocks * card->pcie.blksz_fw_dl); /* Send the boot command to device */ if (mwifiex_pcie_send_boot_cmd(adapter, skb)) { @@ -1400,6 +1855,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, ret = -1; goto done; } + + MWIFIEX_SKB_PACB(skb, &buf_pa); + /* Wait for the command done interrupt */ do { if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS, @@ -1407,11 +1865,17 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, dev_err(adapter->dev, "%s: Failed to read " "interrupt status during fw dnld.\n", __func__); + pci_unmap_single(card->dev, buf_pa, skb->len, + PCI_DMA_TODEVICE); ret = -1; goto done; } } while ((ireg_intr & CPU_INTR_DOOR_BELL) == CPU_INTR_DOOR_BELL); + + pci_unmap_single(card->dev, buf_pa, skb->len, + PCI_DMA_TODEVICE); + offset += txlen; } while (true); @@ -1435,6 +1899,8 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) { int ret = 0; u32 firmware_stat, winner_status; + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; u32 tries; /* Mask spurios interrupts */ @@ -1445,7 +1911,8 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) } dev_dbg(adapter->dev, "Setting driver ready signature\n"); - if (mwifiex_write_reg(adapter, REG_DRV_READY, FIRMWARE_READY_PCIE)) { + if (mwifiex_write_reg(adapter, reg->drv_rdy, + FIRMWARE_READY_PCIE)) { dev_err(adapter->dev, "Failed to write driver ready signature\n"); return -1; @@ -1453,7 +1920,7 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) /* Wait for firmware initialization event */ for (tries = 0; tries < poll_num; tries++) { - if (mwifiex_read_reg(adapter, PCIE_SCRATCH_3_REG, + if (mwifiex_read_reg(adapter, reg->fw_status, &firmware_stat)) ret = -1; else @@ -1470,7 +1937,7 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) } if (ret) { - if (mwifiex_read_reg(adapter, PCIE_SCRATCH_3_REG, + if (mwifiex_read_reg(adapter, reg->fw_status, &winner_status)) ret = -1; else if (!winner_status) { @@ -1594,39 +2061,40 @@ exit: static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) { int ret; - u32 pcie_ireg = 0; + u32 pcie_ireg; unsigned long flags; spin_lock_irqsave(&adapter->int_lock, flags); /* Clear out unused interrupts */ - adapter->int_status &= HOST_INTR_MASK; + pcie_ireg = adapter->int_status; + adapter->int_status = 0; spin_unlock_irqrestore(&adapter->int_lock, flags); - while (adapter->int_status & HOST_INTR_MASK) { - if (adapter->int_status & HOST_INTR_DNLD_DONE) { - adapter->int_status &= ~HOST_INTR_DNLD_DONE; - if (adapter->data_sent) { - dev_dbg(adapter->dev, "info: DATA sent intr\n"); - adapter->data_sent = false; - } + while (pcie_ireg & HOST_INTR_MASK) { + if (pcie_ireg & HOST_INTR_DNLD_DONE) { + pcie_ireg &= ~HOST_INTR_DNLD_DONE; + dev_dbg(adapter->dev, "info: TX DNLD Done\n"); + ret = mwifiex_pcie_send_data_complete(adapter); + if (ret) + return ret; } - if (adapter->int_status & HOST_INTR_UPLD_RDY) { - adapter->int_status &= ~HOST_INTR_UPLD_RDY; + if (pcie_ireg & HOST_INTR_UPLD_RDY) { + pcie_ireg &= ~HOST_INTR_UPLD_RDY; dev_dbg(adapter->dev, "info: Rx DATA\n"); ret = mwifiex_pcie_process_recv_data(adapter); if (ret) return ret; } - if (adapter->int_status & HOST_INTR_EVENT_RDY) { - adapter->int_status &= ~HOST_INTR_EVENT_RDY; + if (pcie_ireg & HOST_INTR_EVENT_RDY) { + pcie_ireg &= ~HOST_INTR_EVENT_RDY; dev_dbg(adapter->dev, "info: Rx EVENT\n"); ret = mwifiex_pcie_process_event_ready(adapter); if (ret) return ret; } - if (adapter->int_status & HOST_INTR_CMD_DONE) { - adapter->int_status &= ~HOST_INTR_CMD_DONE; + if (pcie_ireg & HOST_INTR_CMD_DONE) { + pcie_ireg &= ~HOST_INTR_CMD_DONE; if (adapter->cmd_sent) { dev_dbg(adapter->dev, "info: CMD sent Interrupt\n"); @@ -1654,8 +2122,6 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) "Write register failed\n"); return -1; } - adapter->int_status |= pcie_ireg; - adapter->int_status &= HOST_INTR_MASK; } } @@ -1687,7 +2153,7 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type, } if (type == MWIFIEX_TYPE_DATA) - return mwifiex_pcie_send_data(adapter, skb); + return mwifiex_pcie_send_data(adapter, skb, tx_param); else if (type == MWIFIEX_TYPE_CMD) return mwifiex_pcie_send_cmd(adapter, skb); @@ -1709,6 +2175,7 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter) struct pcie_service_card *card = adapter->card; int ret; struct pci_dev *pdev = card->dev; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; pci_set_drvdata(pdev, card); @@ -1739,6 +2206,7 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter) card->pci_mmap = pci_iomap(pdev, 0, 0); if (!card->pci_mmap) { dev_err(adapter->dev, "iomap(0) error\n"); + ret = -EIO; goto err_iomap0; } ret = pci_request_region(pdev, 2, DRV_NAME); @@ -1749,6 +2217,7 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter) card->pci_mmap1 = pci_iomap(pdev, 2, 0); if (!card->pci_mmap1) { dev_err(adapter->dev, "iomap(2) error\n"); + ret = -EIO; goto err_iomap2; } @@ -1769,10 +2238,13 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter) ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter); if (ret) goto err_alloc_cmdbuf; - ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter); - if (ret) - goto err_alloc_cookie; - + if (reg->sleep_cookie) { + ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter); + if (ret) + goto err_alloc_cookie; + } else { + card->sleep_cookie_vbase = NULL; + } return ret; err_alloc_cookie: @@ -1813,17 +2285,11 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; struct pci_dev *pdev = card->dev; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - mwifiex_pcie_delete_sleep_cookie_buf(adapter); - mwifiex_pcie_delete_cmdrsp_buf(adapter); - mwifiex_pcie_delete_evtbd_ring(adapter); - mwifiex_pcie_delete_rxbd_ring(adapter); - mwifiex_pcie_delete_txbd_ring(adapter); - card->cmdrsp_buf = NULL; - - dev_dbg(adapter->dev, "Clearing driver ready signature\n"); if (user_rmmod) { - if (mwifiex_write_reg(adapter, REG_DRV_READY, 0x00000000)) + dev_dbg(adapter->dev, "Clearing driver ready signature\n"); + if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) dev_err(adapter->dev, "Failed to write driver not-ready signature\n"); } @@ -1861,7 +2327,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) } adapter->dev = &pdev->dev; - strcpy(adapter->fw_name, PCIE8766_DEFAULT_FW_NAME); + strcpy(adapter->fw_name, card->pcie.firmware); return 0; } @@ -1875,10 +2341,21 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg; if (card) { dev_dbg(adapter->dev, "%s(): calling free_irq()\n", __func__); free_irq(card->dev->irq, card->dev); + + reg = card->pcie.reg; + if (reg->sleep_cookie) + mwifiex_pcie_delete_sleep_cookie_buf(adapter); + + mwifiex_pcie_delete_cmdrsp_buf(adapter); + mwifiex_pcie_delete_evtbd_ring(adapter); + mwifiex_pcie_delete_rxbd_ring(adapter); + mwifiex_pcie_delete_txbd_ring(adapter); + card->cmdrsp_buf = NULL; } } @@ -1900,6 +2377,8 @@ static struct mwifiex_if_ops pcie_ops = { .event_complete = mwifiex_pcie_event_complete, .update_mp_end_port = NULL, .cleanup_mpa_buf = NULL, + .init_fw_port = mwifiex_pcie_init_fw_port, + .clean_pcie_ring = mwifiex_clean_pcie_ring_buf, }; /* @@ -1912,7 +2391,7 @@ static int mwifiex_pcie_init_module(void) { int ret; - pr_debug("Marvell 8766 PCIe Driver\n"); + pr_debug("Marvell PCIe Driver\n"); sema_init(&add_remove_card_sem, 1); @@ -1955,4 +2434,5 @@ MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION); MODULE_VERSION(PCIE_VERSION); MODULE_LICENSE("GPL v2"); -MODULE_FIRMWARE("mrvl/pcie8766_uapsta.bin"); +MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME); +MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h index 2f218f9a3fd3..d322ab8604ea 100644 --- a/drivers/net/wireless/mwifiex/pcie.h +++ b/drivers/net/wireless/mwifiex/pcie.h @@ -29,6 +29,11 @@ #include "main.h" #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin" +#define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin" + +#define PCIE_VENDOR_ID_MARVELL (0x11ab) +#define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30) +#define PCIE_DEVICE_ID_MARVELL_88W8897 (0x2b38) /* Constants for Buffer Descriptor (BD) rings */ #define MWIFIEX_MAX_TXRX_BD 0x20 @@ -57,6 +62,8 @@ #define PCIE_SCRATCH_10_REG 0xCE8 #define PCIE_SCRATCH_11_REG 0xCEC #define PCIE_SCRATCH_12_REG 0xCF0 +#define PCIE_RD_DATA_PTR_Q0_Q1 0xC08C +#define PCIE_WR_DATA_PTR_Q0_Q1 0xC05C #define CPU_INTR_DNLD_RDY BIT(0) #define CPU_INTR_DOOR_BELL BIT(1) @@ -75,27 +82,14 @@ #define MWIFIEX_BD_FLAG_ROLLOVER_IND BIT(7) #define MWIFIEX_BD_FLAG_FIRST_DESC BIT(0) #define MWIFIEX_BD_FLAG_LAST_DESC BIT(1) -#define REG_CMD_ADDR_LO PCIE_SCRATCH_0_REG -#define REG_CMD_ADDR_HI PCIE_SCRATCH_1_REG -#define REG_CMD_SIZE PCIE_SCRATCH_2_REG - -#define REG_CMDRSP_ADDR_LO PCIE_SCRATCH_4_REG -#define REG_CMDRSP_ADDR_HI PCIE_SCRATCH_5_REG - -/* TX buffer description read pointer */ -#define REG_TXBD_RDPTR PCIE_SCRATCH_6_REG -/* TX buffer description write pointer */ -#define REG_TXBD_WRPTR PCIE_SCRATCH_7_REG -/* RX buffer description read pointer */ -#define REG_RXBD_RDPTR PCIE_SCRATCH_8_REG -/* RX buffer description write pointer */ -#define REG_RXBD_WRPTR PCIE_SCRATCH_9_REG -/* Event buffer description read pointer */ -#define REG_EVTBD_RDPTR PCIE_SCRATCH_10_REG -/* Event buffer description write pointer */ -#define REG_EVTBD_WRPTR PCIE_SCRATCH_11_REG -/* Driver ready signature write pointer */ -#define REG_DRV_READY PCIE_SCRATCH_12_REG +#define MWIFIEX_BD_FLAG_SOP BIT(0) +#define MWIFIEX_BD_FLAG_EOP BIT(1) +#define MWIFIEX_BD_FLAG_XS_SOP BIT(2) +#define MWIFIEX_BD_FLAG_XS_EOP BIT(3) +#define MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND BIT(7) +#define MWIFIEX_BD_FLAG_RX_ROLLOVER_IND BIT(10) +#define MWIFIEX_BD_FLAG_TX_START_PTR BIT(16) +#define MWIFIEX_BD_FLAG_TX_ROLLOVER_IND BIT(26) /* Max retry number of command write */ #define MAX_WRITE_IOMEM_RETRY 2 @@ -104,45 +98,223 @@ /* FW awake cookie after FW ready */ #define FW_AWAKE_COOKIE (0xAA55AA55) +struct mwifiex_pcie_card_reg { + u16 cmd_addr_lo; + u16 cmd_addr_hi; + u16 fw_status; + u16 cmd_size; + u16 cmdrsp_addr_lo; + u16 cmdrsp_addr_hi; + u16 tx_rdptr; + u16 tx_wrptr; + u16 rx_rdptr; + u16 rx_wrptr; + u16 evt_rdptr; + u16 evt_wrptr; + u16 drv_rdy; + u16 tx_start_ptr; + u32 tx_mask; + u32 tx_wrap_mask; + u32 rx_mask; + u32 rx_wrap_mask; + u32 tx_rollover_ind; + u32 rx_rollover_ind; + u32 evt_rollover_ind; + u8 ring_flag_sop; + u8 ring_flag_eop; + u8 ring_flag_xs_sop; + u8 ring_flag_xs_eop; + u32 ring_tx_start_ptr; + u8 pfu_enabled; + u8 sleep_cookie; +}; + +static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = { + .cmd_addr_lo = PCIE_SCRATCH_0_REG, + .cmd_addr_hi = PCIE_SCRATCH_1_REG, + .cmd_size = PCIE_SCRATCH_2_REG, + .fw_status = PCIE_SCRATCH_3_REG, + .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG, + .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG, + .tx_rdptr = PCIE_SCRATCH_6_REG, + .tx_wrptr = PCIE_SCRATCH_7_REG, + .rx_rdptr = PCIE_SCRATCH_8_REG, + .rx_wrptr = PCIE_SCRATCH_9_REG, + .evt_rdptr = PCIE_SCRATCH_10_REG, + .evt_wrptr = PCIE_SCRATCH_11_REG, + .drv_rdy = PCIE_SCRATCH_12_REG, + .tx_start_ptr = 0, + .tx_mask = MWIFIEX_TXBD_MASK, + .tx_wrap_mask = 0, + .rx_mask = MWIFIEX_RXBD_MASK, + .rx_wrap_mask = 0, + .tx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND, + .rx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND, + .evt_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND, + .ring_flag_sop = 0, + .ring_flag_eop = 0, + .ring_flag_xs_sop = 0, + .ring_flag_xs_eop = 0, + .ring_tx_start_ptr = 0, + .pfu_enabled = 0, + .sleep_cookie = 1, +}; + +static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = { + .cmd_addr_lo = PCIE_SCRATCH_0_REG, + .cmd_addr_hi = PCIE_SCRATCH_1_REG, + .cmd_size = PCIE_SCRATCH_2_REG, + .fw_status = PCIE_SCRATCH_3_REG, + .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG, + .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG, + .tx_rdptr = PCIE_RD_DATA_PTR_Q0_Q1, + .tx_wrptr = PCIE_WR_DATA_PTR_Q0_Q1, + .rx_rdptr = PCIE_WR_DATA_PTR_Q0_Q1, + .rx_wrptr = PCIE_RD_DATA_PTR_Q0_Q1, + .evt_rdptr = PCIE_SCRATCH_10_REG, + .evt_wrptr = PCIE_SCRATCH_11_REG, + .drv_rdy = PCIE_SCRATCH_12_REG, + .tx_start_ptr = 16, + .tx_mask = 0x03FF0000, + .tx_wrap_mask = 0x07FF0000, + .rx_mask = 0x000003FF, + .rx_wrap_mask = 0x000007FF, + .tx_rollover_ind = MWIFIEX_BD_FLAG_TX_ROLLOVER_IND, + .rx_rollover_ind = MWIFIEX_BD_FLAG_RX_ROLLOVER_IND, + .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND, + .ring_flag_sop = MWIFIEX_BD_FLAG_SOP, + .ring_flag_eop = MWIFIEX_BD_FLAG_EOP, + .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP, + .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP, + .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR, + .pfu_enabled = 1, + .sleep_cookie = 0, +}; + +struct mwifiex_pcie_device { + const char *firmware; + const struct mwifiex_pcie_card_reg *reg; + u16 blksz_fw_dl; +}; + +static const struct mwifiex_pcie_device mwifiex_pcie8766 = { + .firmware = PCIE8766_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_8766, + .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, +}; + +static const struct mwifiex_pcie_device mwifiex_pcie8897 = { + .firmware = PCIE8897_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_8897, + .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, +}; + +struct mwifiex_evt_buf_desc { + u64 paddr; + u16 len; + u16 flags; +} __packed; + struct mwifiex_pcie_buf_desc { u64 paddr; u16 len; u16 flags; } __packed; +struct mwifiex_pfu_buf_desc { + u16 flags; + u16 offset; + u16 frag_len; + u16 len; + u64 paddr; + u32 reserved; +} __packed; + struct pcie_service_card { struct pci_dev *dev; struct mwifiex_adapter *adapter; + struct mwifiex_pcie_device pcie; + u8 txbd_flush; u32 txbd_wrptr; u32 txbd_rdptr; u32 txbd_ring_size; u8 *txbd_ring_vbase; - phys_addr_t txbd_ring_pbase; - struct mwifiex_pcie_buf_desc *txbd_ring[MWIFIEX_MAX_TXRX_BD]; + dma_addr_t txbd_ring_pbase; + void *txbd_ring[MWIFIEX_MAX_TXRX_BD]; struct sk_buff *tx_buf_list[MWIFIEX_MAX_TXRX_BD]; u32 rxbd_wrptr; u32 rxbd_rdptr; u32 rxbd_ring_size; u8 *rxbd_ring_vbase; - phys_addr_t rxbd_ring_pbase; - struct mwifiex_pcie_buf_desc *rxbd_ring[MWIFIEX_MAX_TXRX_BD]; + dma_addr_t rxbd_ring_pbase; + void *rxbd_ring[MWIFIEX_MAX_TXRX_BD]; struct sk_buff *rx_buf_list[MWIFIEX_MAX_TXRX_BD]; u32 evtbd_wrptr; u32 evtbd_rdptr; u32 evtbd_ring_size; u8 *evtbd_ring_vbase; - phys_addr_t evtbd_ring_pbase; - struct mwifiex_pcie_buf_desc *evtbd_ring[MWIFIEX_MAX_EVT_BD]; + dma_addr_t evtbd_ring_pbase; + void *evtbd_ring[MWIFIEX_MAX_EVT_BD]; struct sk_buff *evt_buf_list[MWIFIEX_MAX_EVT_BD]; struct sk_buff *cmd_buf; struct sk_buff *cmdrsp_buf; - struct sk_buff *sleep_cookie; + u8 *sleep_cookie_vbase; + dma_addr_t sleep_cookie_pbase; void __iomem *pci_mmap; void __iomem *pci_mmap1; }; +static inline int +mwifiex_pcie_txbd_empty(struct pcie_service_card *card, u32 rdptr) +{ + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + + switch (card->dev->device) { + case PCIE_DEVICE_ID_MARVELL_88W8766P: + if (((card->txbd_wrptr & reg->tx_mask) == + (rdptr & reg->tx_mask)) && + ((card->txbd_wrptr & reg->tx_rollover_ind) != + (rdptr & reg->tx_rollover_ind))) + return 1; + break; + case PCIE_DEVICE_ID_MARVELL_88W8897: + if (((card->txbd_wrptr & reg->tx_mask) == + (rdptr & reg->tx_mask)) && + ((card->txbd_wrptr & reg->tx_rollover_ind) == + (rdptr & reg->tx_rollover_ind))) + return 1; + break; + } + + return 0; +} + +static inline int +mwifiex_pcie_txbd_not_full(struct pcie_service_card *card) +{ + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + + switch (card->dev->device) { + case PCIE_DEVICE_ID_MARVELL_88W8766P: + if (((card->txbd_wrptr & reg->tx_mask) != + (card->txbd_rdptr & reg->tx_mask)) || + ((card->txbd_wrptr & reg->tx_rollover_ind) != + (card->txbd_rdptr & reg->tx_rollover_ind))) + return 1; + break; + case PCIE_DEVICE_ID_MARVELL_88W8897: + if (((card->txbd_wrptr & reg->tx_mask) != + (card->txbd_rdptr & reg->tx_mask)) || + ((card->txbd_wrptr & reg->tx_rollover_ind) == + (card->txbd_rdptr & reg->tx_rollover_ind))) + return 1; + break; + } + + return 0; +} #endif /* _MWIFIEX_PCIE_H */ diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 973a9d90e9ea..bb60c2754a97 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1250,6 +1250,23 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, sizeof(struct ieee_types_header) - bss_entry->beacon_buf); break; + case WLAN_EID_VHT_CAPABILITY: + bss_entry->disable_11ac = false; + bss_entry->bcn_vht_cap = + (void *)(current_ptr + + sizeof(struct ieee_types_header)); + bss_entry->vht_cap_offset = + (u16)((u8 *)bss_entry->bcn_vht_cap - + bss_entry->beacon_buf); + break; + case WLAN_EID_VHT_OPERATION: + bss_entry->bcn_vht_oper = + (void *)(current_ptr + + sizeof(struct ieee_types_header)); + bss_entry->vht_info_offset = + (u16)((u8 *)bss_entry->bcn_vht_oper - + bss_entry->beacon_buf); + break; case WLAN_EID_BSS_COEX_2040: bss_entry->bcn_bss_co_2040 = current_ptr + sizeof(struct ieee_types_header); @@ -1264,6 +1281,14 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, sizeof(struct ieee_types_header) - bss_entry->beacon_buf); break; + case WLAN_EID_OPMODE_NOTIF: + bss_entry->oper_mode = + (void *)(current_ptr + + sizeof(struct ieee_types_header)); + bss_entry->oper_mode_offset = + (u16)((u8 *)bss_entry->oper_mode - + bss_entry->beacon_buf); + break; default: break; } @@ -1309,7 +1334,6 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, struct cmd_ctrl_node *cmd_node; union mwifiex_scan_cmd_config_tlv *scan_cfg_out; struct mwifiex_ie_types_chan_list_param_set *chan_list_out; - u32 buf_size; struct mwifiex_chan_scan_param_set *scan_chan_list; u8 filtered_scan; u8 scan_current_chan_only; @@ -1332,18 +1356,16 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv), - GFP_KERNEL); + GFP_KERNEL); if (!scan_cfg_out) { - dev_err(adapter->dev, "failed to alloc scan_cfg_out\n"); ret = -ENOMEM; goto done; } - buf_size = sizeof(struct mwifiex_chan_scan_param_set) * - MWIFIEX_USER_SCAN_CHAN_MAX; - scan_chan_list = kzalloc(buf_size, GFP_KERNEL); + scan_chan_list = kcalloc(MWIFIEX_USER_SCAN_CHAN_MAX, + sizeof(struct mwifiex_chan_scan_param_set), + GFP_KERNEL); if (!scan_chan_list) { - dev_err(adapter->dev, "failed to alloc scan_chan_list\n"); kfree(scan_cfg_out); ret = -ENOMEM; goto done; @@ -1461,12 +1483,9 @@ static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv, unsigned long flags; /* Allocate and fill new bss descriptor */ - bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), - GFP_KERNEL); - if (!bss_desc) { - dev_err(priv->adapter->dev, " failed to alloc bss_desc\n"); + bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), GFP_KERNEL); + if (!bss_desc) return -ENOMEM; - } ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc); if (ret) @@ -1485,20 +1504,26 @@ static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv, priv->curr_bss_params.bss_descriptor.bcn_wapi_ie = NULL; priv->curr_bss_params.bss_descriptor.wapi_offset = 0; priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL; - priv->curr_bss_params.bss_descriptor.ht_cap_offset = - 0; + priv->curr_bss_params.bss_descriptor.ht_cap_offset = 0; priv->curr_bss_params.bss_descriptor.bcn_ht_oper = NULL; - priv->curr_bss_params.bss_descriptor.ht_info_offset = - 0; - priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 = - NULL; - priv->curr_bss_params.bss_descriptor. - bss_co_2040_offset = 0; + priv->curr_bss_params.bss_descriptor.ht_info_offset = 0; + priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 = NULL; + priv->curr_bss_params.bss_descriptor.bss_co_2040_offset = 0; priv->curr_bss_params.bss_descriptor.bcn_ext_cap = NULL; priv->curr_bss_params.bss_descriptor.ext_cap_offset = 0; priv->curr_bss_params.bss_descriptor.beacon_buf = NULL; - priv->curr_bss_params.bss_descriptor.beacon_buf_size = - 0; + priv->curr_bss_params.bss_descriptor.beacon_buf_size = 0; + priv->curr_bss_params.bss_descriptor.bcn_vht_cap = NULL; + priv->curr_bss_params.bss_descriptor.vht_cap_offset = 0; + priv->curr_bss_params.bss_descriptor.bcn_vht_oper = NULL; + priv->curr_bss_params.bss_descriptor.vht_info_offset = 0; + priv->curr_bss_params.bss_descriptor.oper_mode = NULL; + priv->curr_bss_params.bss_descriptor.oper_mode_offset = 0; + + /* Disable 11ac by default. Enable it only where there + * exist VHT_CAP IE in AP beacon + */ + priv->curr_bss_params.bss_descriptor.disable_11ac = true; /* Make a copy of current BSSID descriptor */ memcpy(&priv->curr_bss_params.bss_descriptor, bss_desc, @@ -1747,7 +1772,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, .mac_address, ETH_ALEN)) mwifiex_update_curr_bss_params(priv, bss); - cfg80211_put_bss(bss); + cfg80211_put_bss(priv->wdev->wiphy, bss); } } else { dev_dbg(adapter->dev, "missing BSS channel IE\n"); @@ -1880,10 +1905,8 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv, } scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL); - if (!scan_cfg) { - dev_err(adapter->dev, "failed to alloc scan_cfg\n"); + if (!scan_cfg) return -ENOMEM; - } scan_cfg->ssid_list = req_ssid; scan_cfg->num_ssids = 1; @@ -1997,11 +2020,8 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv) kfree(priv->curr_bcn_buf); priv->curr_bcn_buf = kmalloc(curr_bss->beacon_buf_size, GFP_ATOMIC); - if (!priv->curr_bcn_buf) { - dev_err(priv->adapter->dev, - "failed to alloc curr_bcn_buf\n"); + if (!priv->curr_bcn_buf) return; - } } memcpy(priv->curr_bcn_buf, curr_bss->beacon_buf, @@ -2033,6 +2053,14 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv) (curr_bss->beacon_buf + curr_bss->ht_info_offset); + if (curr_bss->bcn_vht_cap) + curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf + + curr_bss->vht_cap_offset); + + if (curr_bss->bcn_vht_oper) + curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf + + curr_bss->vht_info_offset); + if (curr_bss->bcn_bss_co_2040) curr_bss->bcn_bss_co_2040 = (curr_bss->beacon_buf + curr_bss->bss_co_2040_offset); @@ -2040,6 +2068,10 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv) if (curr_bss->bcn_ext_cap) curr_bss->bcn_ext_cap = curr_bss->beacon_buf + curr_bss->ext_cap_offset; + + if (curr_bss->oper_mode) + curr_bss->oper_mode = (void *)(curr_bss->beacon_buf + + curr_bss->oper_mode_offset); } /* diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index f2874c3392b4..e63f646a260e 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -332,7 +332,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *buffer, u32 pkt_len, u32 port) { struct sdio_mmc_card *card = adapter->card; - int ret = -1; + int ret; u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE; u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1; @@ -350,8 +350,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter, sdio_claim_host(card->func); - if (!sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size)) - ret = 0; + ret = sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size); sdio_release_host(card->func); @@ -365,7 +364,7 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer, u32 len, u32 port, u8 claim) { struct sdio_mmc_card *card = adapter->card; - int ret = -1; + int ret; u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE; u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1; @@ -376,8 +375,7 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer, if (claim) sdio_claim_host(card->func); - if (!sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size)) - ret = 0; + ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size); if (claim) sdio_release_host(card->func); @@ -718,11 +716,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, /* Assume that the allocated buffer is 8-byte aligned */ fwbuf = kzalloc(MWIFIEX_UPLD_SIZE, GFP_KERNEL); - if (!fwbuf) { - dev_err(adapter->dev, - "unable to alloc buffer for FW. Terminating dnld\n"); + if (!fwbuf) return -ENOMEM; - } /* Perform firmware data transfer */ do { @@ -1520,7 +1515,6 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter, card->mpa_tx.buf = kzalloc(mpa_tx_buf_size, GFP_KERNEL); if (!card->mpa_tx.buf) { - dev_err(adapter->dev, "could not alloc buffer for MP-A TX\n"); ret = -1; goto error; } @@ -1529,7 +1523,6 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter, card->mpa_rx.buf = kzalloc(mpa_rx_buf_size, GFP_KERNEL); if (!card->mpa_rx.buf) { - dev_err(adapter->dev, "could not alloc buffer for MP-A RX\n"); ret = -1; goto error; } @@ -1682,10 +1675,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) /* Allocate buffers for SDIO MP-A */ card->mp_regs = kzalloc(MAX_MP_REGS, GFP_KERNEL); - if (!card->mp_regs) { - dev_err(adapter->dev, "failed to alloc mp_regs\n"); + if (!card->mp_regs) return -ENOMEM; - } ret = mwifiex_alloc_sdio_mpa_buffers(adapter, SDIO_MP_TX_AGGR_DEF_BUF_SIZE, diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 5d87195390f8..c55c5bb93134 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -931,7 +931,6 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv, struct host_cmd_ds_pcie_details *host_spec = &cmd->params.pcie_host_spec; struct pcie_service_card *card = priv->adapter->card; - phys_addr_t *buf_pa; cmd->command = cpu_to_le16(HostCmd_CMD_PCIE_DESC_DETAILS); cmd->size = cpu_to_le16(sizeof(struct @@ -953,10 +952,11 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv, host_spec->evtbd_addr_lo = (u32)(card->evtbd_ring_pbase); host_spec->evtbd_addr_hi = (u32)(((u64)card->evtbd_ring_pbase)>>32); host_spec->evtbd_count = MWIFIEX_MAX_EVT_BD; - if (card->sleep_cookie) { - buf_pa = MWIFIEX_SKB_PACB(card->sleep_cookie); - host_spec->sleep_cookie_addr_lo = (u32) *buf_pa; - host_spec->sleep_cookie_addr_hi = (u32) (((u64)*buf_pa) >> 32); + if (card->sleep_cookie_vbase) { + host_spec->sleep_cookie_addr_lo = + (u32)(card->sleep_cookie_pbase); + host_spec->sleep_cookie_addr_hi = + (u32)(((u64)(card->sleep_cookie_pbase)) >> 32); dev_dbg(priv->adapter->dev, "sleep_cook_lo phy addr: 0x%x\n", host_spec->sleep_cookie_addr_lo); } @@ -1230,7 +1230,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, data_buf); break; case HostCmd_CMD_11N_CFG: - ret = mwifiex_cmd_11n_cfg(cmd_ptr, cmd_action, data_buf); + ret = mwifiex_cmd_11n_cfg(priv, cmd_ptr, cmd_action, data_buf); break; case HostCmd_CMD_WMM_GET_STATUS: dev_dbg(priv->adapter->dev, diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 65c12eb3e5e7..4669f8d9389f 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -24,6 +24,7 @@ #include "main.h" #include "wmm.h" #include "11n.h" +#include "11ac.h" /* @@ -935,9 +936,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, / MWIFIEX_SDIO_BLOCK_SIZE) * MWIFIEX_SDIO_BLOCK_SIZE; adapter->curr_tx_buf_size = adapter->tx_buf_size; - dev_dbg(adapter->dev, - "cmd: max_tx_buf_size=%d, tx_buf_size=%d\n", - adapter->max_tx_buf_size, adapter->tx_buf_size); + dev_dbg(adapter->dev, "cmd: curr_tx_buf_size=%d\n", + adapter->curr_tx_buf_size); if (adapter->if_ops.update_mp_end_port) adapter->if_ops.update_mp_end_port(adapter, diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index f542bb8ccbc8..9f33c92c90f5 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -162,13 +162,9 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, rcu_read_lock(); ies = rcu_dereference(bss->ies); - if (WARN_ON(!ies)) { - /* should never happen */ - rcu_read_unlock(); - return -EINVAL; - } beacon_ie = kmemdup(ies->data, ies->len, GFP_ATOMIC); beacon_ie_len = ies->len; + bss_desc->timestamp = ies->tsf; rcu_read_unlock(); if (!beacon_ie) { @@ -184,7 +180,6 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, bss_desc->cap_info_bitmap = bss->capability; bss_desc->bss_band = bss_priv->band; bss_desc->fw_tsf = bss_priv->fw_tsf; - bss_desc->timestamp = bss->tsf; if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) { dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n"); bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP; @@ -266,11 +261,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, /* Allocate and fill new bss descriptor */ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), - GFP_KERNEL); - if (!bss_desc) { - dev_err(priv->adapter->dev, " failed to alloc bss_desc\n"); + GFP_KERNEL); + if (!bss_desc) return -ENOMEM; - } ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc); if (ret) @@ -288,9 +281,10 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, if (mwifiex_band_to_radio_type((u8) bss_desc->bss_band) == HostCmd_SCAN_RADIO_TYPE_BG) - config_bands = BAND_B | BAND_G | BAND_GN; + config_bands = BAND_B | BAND_G | BAND_GN | + BAND_GAC; else - config_bands = BAND_A | BAND_AN; + config_bands = BAND_A | BAND_AN | BAND_AAC; if (!((config_bands | adapter->fw_bands) & ~adapter->fw_bands)) @@ -324,7 +318,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, } if (bss) - cfg80211_put_bss(bss); + cfg80211_put_bss(priv->adapter->wiphy, bss); } else { /* Adhoc mode */ /* If the requested SSID matches current SSID, return */ @@ -354,7 +348,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, " list. Joining...\n"); ret = mwifiex_adhoc_join(priv, bss_desc); if (bss) - cfg80211_put_bss(bss); + cfg80211_put_bss(priv->adapter->wiphy, bss); } else { dev_dbg(adapter->dev, "info: Network not found in " "the list, creating adhoc with ssid = %s\n", @@ -636,11 +630,8 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, } } buf = kzalloc(MWIFIEX_SIZE_OF_CMD_BUFFER, GFP_KERNEL); - if (!buf) { - dev_err(priv->adapter->dev, "%s: failed to alloc cmd buffer\n", - __func__); + if (!buf) return -ENOMEM; - } txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf; txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET); diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index 8c80024c30ff..296faec14365 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -117,14 +117,16 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); break; case -1: - adapter->data_sent = false; + if (adapter->iface_type != MWIFIEX_PCIE) + adapter->data_sent = false; dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n", ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb, 0, ret); break; case -EINPROGRESS: - adapter->data_sent = false; + if (adapter->iface_type != MWIFIEX_PCIE) + adapter->data_sent = false; break; case 0: mwifiex_write_data_complete(adapter, skb, 0, ret); diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index 8dd72240f162..6e76a15a8950 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c @@ -219,6 +219,7 @@ void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config) config->rts_threshold = 0x7FFF; config->frag_threshold = 0x7FFF; config->retry_limit = 0x7F; + config->qos_info = 0xFF; } /* This function parses BSS related parameters from structure @@ -297,6 +298,38 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size) return; } +/* This function parses WMM related parameters from cfg80211_ap_settings + * structure and updates bss_config structure. + */ +void +mwifiex_set_wmm_params(struct mwifiex_private *priv, + struct mwifiex_uap_bss_param *bss_cfg, + struct cfg80211_ap_settings *params) +{ + const u8 *vendor_ie; + struct ieee_types_header *wmm_ie; + u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02}; + + vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, + WLAN_OUI_TYPE_MICROSOFT_WMM, + params->beacon.tail, + params->beacon.tail_len); + if (vendor_ie) { + wmm_ie = (struct ieee_types_header *)vendor_ie; + memcpy(&bss_cfg->wmm_info, wmm_ie + 1, + sizeof(bss_cfg->wmm_info)); + priv->wmm_enabled = 1; + } else { + memset(&bss_cfg->wmm_info, 0, sizeof(bss_cfg->wmm_info)); + memcpy(&bss_cfg->wmm_info.oui, wmm_oui, sizeof(wmm_oui)); + bss_cfg->wmm_info.subtype = MWIFIEX_WMM_SUBTYPE; + bss_cfg->wmm_info.version = MWIFIEX_WMM_VERSION; + priv->wmm_enabled = 0; + } + + bss_cfg->qos_info = 0x00; + return; +} /* This function parses BSS related parameters from structure * and prepares TLVs specific to WEP encryption. * These TLVs are appended to command buffer. @@ -354,6 +387,7 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) struct host_cmd_tlv_rates *tlv_rates; struct host_cmd_tlv_ageout_timer *ao_timer, *ps_ao_timer; struct mwifiex_ie_types_htcap *htcap; + struct mwifiex_ie_types_wmmcap *wmm_cap; struct mwifiex_uap_bss_param *bss_cfg = cmd_buf; int i; u16 cmd_size = *param_size; @@ -507,6 +541,16 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) tlv += sizeof(struct mwifiex_ie_types_htcap); } + if (bss_cfg->wmm_info.qos_info != 0xFF) { + wmm_cap = (struct mwifiex_ie_types_wmmcap *)tlv; + wmm_cap->header.type = cpu_to_le16(WLAN_EID_VENDOR_SPECIFIC); + wmm_cap->header.len = cpu_to_le16(sizeof(wmm_cap->wmm_info)); + memcpy(&wmm_cap->wmm_info, &bss_cfg->wmm_info, + sizeof(wmm_cap->wmm_info)); + cmd_size += sizeof(struct mwifiex_ie_types_wmmcap); + tlv += sizeof(struct mwifiex_ie_types_wmmcap); + } + if (bss_cfg->sta_ao_timer) { ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv; ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER); diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index 63ac9f2d11ae..f90fe21e5bfd 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -672,7 +672,7 @@ static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf, *len, &actual_length, timeout); if (ret) { dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret); - ret = -1; + return ret; } *len = actual_length; @@ -691,7 +691,7 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf, *len, &actual_length, timeout); if (ret) { dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret); - ret = -1; + return ret; } *len = actual_length; @@ -786,21 +786,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) return 0; } -/* This function reads one block of firmware data. */ -static int mwifiex_get_fw_data(struct mwifiex_adapter *adapter, - u32 offset, u32 len, u8 *buf) -{ - if (!buf || !len) - return -1; - - if (offset + len > adapter->firmware->size) - return -1; - - memcpy(buf, adapter->firmware->data + offset, len); - - return 0; -} - static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, struct mwifiex_fw_image *fw) { @@ -836,23 +821,14 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, dlen = 0; } else { /* copy the header of the fw_data to get the length */ - if (firmware) - memcpy(&fwdata->fw_hdr, &firmware[tlen], - sizeof(struct fw_header)); - else - mwifiex_get_fw_data(adapter, tlen, - sizeof(struct fw_header), - (u8 *)&fwdata->fw_hdr); + memcpy(&fwdata->fw_hdr, &firmware[tlen], + sizeof(struct fw_header)); dlen = le32_to_cpu(fwdata->fw_hdr.data_len); dnld_cmd = le32_to_cpu(fwdata->fw_hdr.dnld_cmd); tlen += sizeof(struct fw_header); - if (firmware) - memcpy(fwdata->data, &firmware[tlen], dlen); - else - mwifiex_get_fw_data(adapter, tlen, dlen, - (u8 *)fwdata->data); + memcpy(fwdata->data, &firmware[tlen], dlen); fwdata->seq_num = cpu_to_le32(fw_seqnum); tlen += dlen; diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index 0982375ba3b1..21553976b550 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -91,7 +91,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv, memcpy(info->packets_out, priv->wmm.packets_out, sizeof(priv->wmm.packets_out)); - info->max_tx_buf_size = (u32) adapter->max_tx_buf_size; + info->curr_tx_buf_size = (u32) adapter->curr_tx_buf_size; info->tx_buf_size = (u32) adapter->tx_buf_size; info->rx_tbl_num = mwifiex_get_rx_reorder_tbl(priv, info->rx_tbl); diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h index f6d36b9654a0..cb2d0582bd36 100644 --- a/drivers/net/wireless/mwifiex/util.h +++ b/drivers/net/wireless/mwifiex/util.h @@ -22,16 +22,16 @@ static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb) { - return (struct mwifiex_rxinfo *)(skb->cb + sizeof(phys_addr_t)); + return (struct mwifiex_rxinfo *)(skb->cb + sizeof(dma_addr_t)); } static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb) { - return (struct mwifiex_txinfo *)(skb->cb + sizeof(phys_addr_t)); + return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t)); } -static inline phys_addr_t *MWIFIEX_SKB_PACB(struct sk_buff *skb) +static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb, dma_addr_t *buf_pa) { - return (phys_addr_t *)skb->cb; + memcpy(buf_pa, skb->cb, sizeof(dma_addr_t)); } #endif /* !_MWIFIEX_UTIL_H_ */ diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 818f871ae987..32adc878041d 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -109,12 +109,9 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra) struct mwifiex_ra_list_tbl *ra_list; ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC); - - if (!ra_list) { - dev_err(adapter->dev, "%s: failed to alloc ra_list\n", - __func__); + if (!ra_list) return NULL; - } + INIT_LIST_HEAD(&ra_list->list); skb_queue_head_init(&ra_list->skb_head); @@ -568,6 +565,8 @@ mwifiex_clean_txrx(struct mwifiex_private *priv) mwifiex_wmm_delete_all_ralist(priv); memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid)); + if (priv->adapter->if_ops.clean_pcie_ring) + priv->adapter->if_ops.clean_pcie_ring(priv->adapter); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); } @@ -1206,13 +1205,15 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv, ra_list_flags); break; case -1: - adapter->data_sent = false; + if (adapter->iface_type != MWIFIEX_PCIE) + adapter->data_sent = false; dev_err(adapter->dev, "host_to_card failed: %#x\n", ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb, 0, ret); break; case -EINPROGRESS: - adapter->data_sent = false; + if (adapter->iface_type != MWIFIEX_PCIE) + adapter->data_sent = false; default: break; } diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index a00a03ea4ec9..091d9a64080a 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -101,6 +101,18 @@ MODULE_PARM_DESC(ap_mode_default, #define MWL8K_MAX_TX_QUEUES (MWL8K_TX_WMM_QUEUES + MWL8K_MAX_AMPDU_QUEUES) #define mwl8k_tx_queues(priv) (MWL8K_TX_WMM_QUEUES + (priv)->num_ampdu_queues) +/* txpriorities are mapped with hw queues. + * Each hw queue has a txpriority. + */ +#define TOTAL_HW_TX_QUEUES 8 + +/* Each HW queue can have one AMPDU stream. + * But, because one of the hw queue is reserved, + * maximum AMPDU queues that can be created are + * one short of total tx queues. + */ +#define MWL8K_NUM_AMPDU_STREAMS (TOTAL_HW_TX_QUEUES - 1) + struct rxd_ops { int rxd_size; void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr); @@ -160,7 +172,6 @@ struct mwl8k_ampdu_stream { u8 tid; u8 state; u8 idx; - u8 txq_idx; /* index of this stream in priv->txq */ }; struct mwl8k_priv { @@ -202,6 +213,8 @@ struct mwl8k_priv { int fw_mutex_depth; struct completion *hostcmd_wait; + atomic_t watchdog_event_pending; + /* lock held over TX and TX reap */ spinlock_t tx_lock; @@ -272,6 +285,9 @@ struct mwl8k_priv { char *fw_pref; char *fw_alt; struct completion firmware_loading_complete; + + /* bitmap of running BSSes */ + u32 running_bsses; }; #define MAX_WEP_KEY_LEN 13 @@ -1133,7 +1149,6 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index) rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL); if (rxq->buf == NULL) { - wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n"); pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); return -ENOMEM; } @@ -1426,7 +1441,6 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL); if (txq->skb == NULL) { - wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n"); pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); return -ENOMEM; } @@ -1516,6 +1530,9 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) return -EBUSY; } + if (atomic_read(&priv->watchdog_event_pending)) + return 0; + /* * The TX queues are stopped at this point, so this test * doesn't need to take ->tx_lock. @@ -1537,6 +1554,14 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) spin_unlock_bh(&priv->tx_lock); timeout = wait_for_completion_timeout(&tx_wait, msecs_to_jiffies(MWL8K_TX_WAIT_TIMEOUT_MS)); + + if (atomic_read(&priv->watchdog_event_pending)) { + spin_lock_bh(&priv->tx_lock); + priv->tx_wait = NULL; + spin_unlock_bh(&priv->tx_lock); + return 0; + } + spin_lock_bh(&priv->tx_lock); if (timeout) { @@ -1564,6 +1589,7 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) rc = -ETIMEDOUT; } + priv->tx_wait = NULL; spin_unlock_bh(&priv->tx_lock); return rc; @@ -1734,14 +1760,13 @@ mwl8k_add_stream(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u8 tid) struct mwl8k_priv *priv = hw->priv; int i; - for (i = 0; i < priv->num_ampdu_queues; i++) { + for (i = 0; i < MWL8K_NUM_AMPDU_STREAMS; i++) { stream = &priv->ampdu[i]; if (stream->state == AMPDU_NO_STREAM) { stream->sta = sta; stream->state = AMPDU_STREAM_NEW; stream->tid = tid; stream->idx = i; - stream->txq_idx = MWL8K_TX_WMM_QUEUES + i; wiphy_debug(hw->wiphy, "Added a new stream for %pM %d", sta->addr, tid); return stream; @@ -1782,7 +1807,7 @@ mwl8k_lookup_stream(struct ieee80211_hw *hw, u8 *addr, u8 tid) struct mwl8k_priv *priv = hw->priv; int i; - for (i = 0 ; i < priv->num_ampdu_queues; i++) { + for (i = 0; i < MWL8K_NUM_AMPDU_STREAMS; i++) { struct mwl8k_ampdu_stream *stream; stream = &priv->ampdu[i]; if (stream->state == AMPDU_NO_STREAM) @@ -1829,6 +1854,13 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid) tx_stats->pkts++; } +/* The hardware ampdu queues start from 5. + * txpriorities for ampdu queues are + * 5 6 7 0 1 2 3 4 ie., queue 5 is highest + * and queue 3 is lowest (queue 4 is reserved) + */ +#define BA_QUEUE 5 + static void mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, @@ -1928,8 +1960,13 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, stream = mwl8k_lookup_stream(hw, sta->addr, tid); if (stream != NULL) { if (stream->state == AMPDU_STREAM_ACTIVE) { - txpriority = stream->txq_idx; - index = stream->txq_idx; + WARN_ON(!(qos & MWL8K_QOS_ACK_POLICY_BLOCKACK)); + txpriority = (BA_QUEUE + stream->idx) % + TOTAL_HW_TX_QUEUES; + if (stream->idx <= 1) + index = stream->idx + + MWL8K_TX_WMM_QUEUES; + } else if (stream->state == AMPDU_STREAM_NEW) { /* We get here if the driver sends us packets * after we've initiated a stream, but before @@ -1971,6 +2008,9 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, } } spin_unlock(&priv->stream_lock); + } else { + qos &= ~MWL8K_QOS_ACK_POLICY_MASK; + qos |= MWL8K_QOS_ACK_POLICY_NORMAL; } dma = pci_map_single(priv->pdev, skb->data, @@ -2117,6 +2157,8 @@ static void mwl8k_fw_unlock(struct ieee80211_hw *hw) } } +static void mwl8k_enable_bsses(struct ieee80211_hw *hw, bool enable, + u32 bitmap); /* * Command processing. @@ -2135,6 +2177,34 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) int rc; unsigned long timeout = 0; u8 buf[32]; + u32 bitmap = 0; + + wiphy_dbg(hw->wiphy, "Posting %s [%d]\n", + mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), cmd->macid); + + /* Before posting firmware commands that could change the hardware + * characteristics, make sure that all BSSes are stopped temporary. + * Enable these stopped BSSes after completion of the commands + */ + + rc = mwl8k_fw_lock(hw); + if (rc) + return rc; + + if (priv->ap_fw && priv->running_bsses) { + switch (le16_to_cpu(cmd->code)) { + case MWL8K_CMD_SET_RF_CHANNEL: + case MWL8K_CMD_RADIO_CONTROL: + case MWL8K_CMD_RF_TX_POWER: + case MWL8K_CMD_TX_POWER: + case MWL8K_CMD_RF_ANTENNA: + case MWL8K_CMD_RTS_THRESHOLD: + case MWL8K_CMD_MIMO_CONFIG: + bitmap = priv->running_bsses; + mwl8k_enable_bsses(hw, false, bitmap); + break; + } + } cmd->result = (__force __le16) 0xffff; dma_size = le16_to_cpu(cmd->length); @@ -2143,13 +2213,6 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) if (pci_dma_mapping_error(priv->pdev, dma_addr)) return -ENOMEM; - rc = mwl8k_fw_lock(hw); - if (rc) { - pci_unmap_single(priv->pdev, dma_addr, dma_size, - PCI_DMA_BIDIRECTIONAL); - return rc; - } - priv->hostcmd_wait = &cmd_wait; iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR); iowrite32(MWL8K_H2A_INT_DOORBELL, @@ -2162,7 +2225,6 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) priv->hostcmd_wait = NULL; - mwl8k_fw_unlock(hw); pci_unmap_single(priv->pdev, dma_addr, dma_size, PCI_DMA_BIDIRECTIONAL); @@ -2189,6 +2251,11 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) ms); } + if (bitmap) + mwl8k_enable_bsses(hw, true, bitmap); + + mwl8k_fw_unlock(hw); + return rc; } @@ -2450,7 +2517,7 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw) priv->hw_rev = cmd->hw_rev; mwl8k_set_caps(hw, le32_to_cpu(cmd->caps)); priv->ap_macids_supported = 0x000000ff; - priv->sta_macids_supported = 0x00000000; + priv->sta_macids_supported = 0x00000100; priv->num_ampdu_queues = le32_to_cpu(cmd->num_of_ampdu_queues); if (priv->num_ampdu_queues > MWL8K_MAX_AMPDU_QUEUES) { wiphy_warn(hw->wiphy, "fw reported %d ampdu queues" @@ -3469,7 +3536,10 @@ static int mwl8k_cmd_update_mac_addr(struct ieee80211_hw *hw, mac_type = MWL8K_MAC_TYPE_PRIMARY_AP; if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) { if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported)) - mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT; + if (priv->ap_fw) + mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT; + else + mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT; else mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT; } else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) { @@ -3578,7 +3648,11 @@ static int mwl8k_cmd_get_watchdog_bitmap(struct ieee80211_hw *hw, u8 *bitmap) return rc; } -#define INVALID_BA 0xAA +#define MWL8K_WMM_QUEUE_NUMBER 3 + +static void mwl8k_destroy_ba(struct ieee80211_hw *hw, + u8 idx); + static void mwl8k_watchdog_ba_events(struct work_struct *work) { int rc; @@ -3586,24 +3660,41 @@ static void mwl8k_watchdog_ba_events(struct work_struct *work) struct mwl8k_ampdu_stream *streams; struct mwl8k_priv *priv = container_of(work, struct mwl8k_priv, watchdog_ba_handle); + struct ieee80211_hw *hw = priv->hw; + int i; + u32 status = 0; + + mwl8k_fw_lock(hw); rc = mwl8k_cmd_get_watchdog_bitmap(priv->hw, &bitmap); if (rc) - return; + goto done; - if (bitmap == INVALID_BA) - return; + spin_lock(&priv->stream_lock); /* the bitmap is the hw queue number. Map it to the ampdu queue. */ - stream_index = bitmap - MWL8K_TX_WMM_QUEUES; - - BUG_ON(stream_index >= priv->num_ampdu_queues); - - streams = &priv->ampdu[stream_index]; - - if (streams->state == AMPDU_STREAM_ACTIVE) - ieee80211_stop_tx_ba_session(streams->sta, streams->tid); + for (i = 0; i < TOTAL_HW_TX_QUEUES; i++) { + if (bitmap & (1 << i)) { + stream_index = (i + MWL8K_WMM_QUEUE_NUMBER) % + TOTAL_HW_TX_QUEUES; + streams = &priv->ampdu[stream_index]; + if (streams->state == AMPDU_STREAM_ACTIVE) { + ieee80211_stop_tx_ba_session(streams->sta, + streams->tid); + spin_unlock(&priv->stream_lock); + mwl8k_destroy_ba(hw, stream_index); + spin_lock(&priv->stream_lock); + } + } + } + spin_unlock(&priv->stream_lock); +done: + atomic_dec(&priv->watchdog_event_pending); + status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK); + iowrite32((status | MWL8K_A2H_INT_BA_WATCHDOG), + priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK); + mwl8k_fw_unlock(hw); return; } @@ -3620,8 +3711,16 @@ static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int enable) { struct mwl8k_cmd_bss_start *cmd; + struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); + struct mwl8k_priv *priv = hw->priv; int rc; + if (enable && (priv->running_bsses & (1 << mwl8k_vif->macid))) + return 0; + + if (!enable && !(priv->running_bsses & (1 << mwl8k_vif->macid))) + return 0; + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; @@ -3633,9 +3732,31 @@ static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw, rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); kfree(cmd); + if (!rc) { + if (enable) + priv->running_bsses |= (1 << mwl8k_vif->macid); + else + priv->running_bsses &= ~(1 << mwl8k_vif->macid); + } return rc; } +static void mwl8k_enable_bsses(struct ieee80211_hw *hw, bool enable, u32 bitmap) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_vif *mwl8k_vif, *tmp_vif; + struct ieee80211_vif *vif; + + list_for_each_entry_safe(mwl8k_vif, tmp_vif, &priv->vif_list, list) { + vif = mwl8k_vif->vif; + + if (!(bitmap & (1 << mwl8k_vif->macid))) + continue; + + if (vif->type == NL80211_IFTYPE_AP) + mwl8k_cmd_bss_start(hw, vif, enable); + } +} /* * CMD_BASTREAM. */ @@ -3763,7 +3884,7 @@ mwl8k_create_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream, } static void mwl8k_destroy_ba(struct ieee80211_hw *hw, - struct mwl8k_ampdu_stream *stream) + u8 idx) { struct mwl8k_cmd_bastream *cmd; @@ -3775,10 +3896,10 @@ static void mwl8k_destroy_ba(struct ieee80211_hw *hw, cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32(MWL8K_BA_DESTROY); - cmd->destroy_params.ba_context = cpu_to_le32(stream->idx); + cmd->destroy_params.ba_context = cpu_to_le32(idx); mwl8k_post_cmd(hw, &cmd->header); - wiphy_debug(hw->wiphy, "Deleted BA stream index %d\n", stream->idx); + wiphy_debug(hw->wiphy, "Deleted BA stream index %d\n", idx); kfree(cmd); } @@ -3875,7 +3996,30 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 *addr) { struct mwl8k_cmd_set_new_stn *cmd; - int rc; + struct mwl8k_priv *priv = hw->priv; + int rc, i; + u8 idx; + + spin_lock(&priv->stream_lock); + /* Destroy any active ampdu streams for this sta */ + for (i = 0; i < MWL8K_NUM_AMPDU_STREAMS; i++) { + struct mwl8k_ampdu_stream *s; + s = &priv->ampdu[i]; + if (s->state != AMPDU_NO_STREAM) { + if (memcmp(s->sta->addr, addr, ETH_ALEN) == 0) { + if (s->state == AMPDU_STREAM_ACTIVE) { + idx = s->idx; + spin_unlock(&priv->stream_lock); + mwl8k_destroy_ba(hw, idx); + spin_lock(&priv->stream_lock); + } else if (s->state == AMPDU_STREAM_NEW) { + mwl8k_remove_stream(hw, s); + } + } + } + } + + spin_unlock(&priv->stream_lock); cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) @@ -4119,8 +4263,9 @@ static int mwl8k_set_key(struct ieee80211_hw *hw, u8 encr_type; u8 *addr; struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); + struct mwl8k_priv *priv = hw->priv; - if (vif->type == NL80211_IFTYPE_STATION) + if (vif->type == NL80211_IFTYPE_STATION && !priv->ap_fw) return -EOPNOTSUPP; if (sta == NULL) @@ -4303,6 +4448,10 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id) } if (status & MWL8K_A2H_INT_BA_WATCHDOG) { + iowrite32(~MWL8K_A2H_INT_BA_WATCHDOG, + priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK); + + atomic_inc(&priv->watchdog_event_pending); status &= ~MWL8K_A2H_INT_BA_WATCHDOG; ieee80211_queue_work(hw, &priv->watchdog_ba_handle); } @@ -4446,6 +4595,8 @@ static int mwl8k_start(struct ieee80211_hw *hw) priv->irq = -1; tasklet_disable(&priv->poll_tx_task); tasklet_disable(&priv->poll_rx_task); + } else { + ieee80211_wake_queues(hw); } return rc; @@ -4520,12 +4671,18 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw, break; case NL80211_IFTYPE_STATION: if (priv->ap_fw && di->fw_image_sta) { - /* we must load the sta fw to meet this request */ - if (!list_empty(&priv->vif_list)) - return -EBUSY; - rc = mwl8k_reload_firmware(hw, di->fw_image_sta); - if (rc) - return rc; + if (!list_empty(&priv->vif_list)) { + wiphy_warn(hw->wiphy, "AP interface is running.\n" + "Adding STA interface for WDS"); + } else { + /* we must load the sta fw to + * meet this request. + */ + rc = mwl8k_reload_firmware(hw, + di->fw_image_sta); + if (rc) + return rc; + } } macids_supported = priv->sta_macids_supported; break; @@ -4549,7 +4706,7 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw, /* Set the mac address. */ mwl8k_cmd_set_mac_addr(hw, vif, vif->addr); - if (priv->ap_fw) + if (vif->type == NL80211_IFTYPE_AP) mwl8k_cmd_set_new_stn_add_self(hw, vif); priv->macids_used |= 1 << mwl8k_vif->macid; @@ -4574,7 +4731,7 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw, struct mwl8k_priv *priv = hw->priv; struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); - if (priv->ap_fw) + if (vif->type == NL80211_IFTYPE_AP) mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr); mwl8k_cmd_del_mac_addr(hw, vif, vif->addr); @@ -4648,9 +4805,11 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed) if (rc) goto out; - rc = mwl8k_cmd_set_rf_channel(hw, conf); - if (rc) - goto out; + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + rc = mwl8k_cmd_set_rf_channel(hw, conf); + if (rc) + goto out; + } if (conf->power_level > 18) conf->power_level = 18; @@ -4663,12 +4822,6 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed) goto out; } - rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3); - if (rc) - wiphy_warn(hw->wiphy, "failed to set # of RX antennas"); - rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7); - if (rc) - wiphy_warn(hw->wiphy, "failed to set # of TX antennas"); } else { rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level); @@ -4726,7 +4879,8 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, rcu_read_unlock(); } - if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) { + if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc && + !priv->ap_fw) { rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates); if (rc) goto out; @@ -4734,6 +4888,25 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, rc = mwl8k_cmd_use_fixed_rate_sta(hw); if (rc) goto out; + } else { + if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc && + priv->ap_fw) { + int idx; + int rate; + + /* Use AP firmware specific rate command. + */ + idx = ffs(vif->bss_conf.basic_rates); + if (idx) + idx--; + + if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) + rate = mwl8k_rates_24[idx].hw_value; + else + rate = mwl8k_rates_50[idx].hw_value; + + mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate); + } } if (changed & BSS_CHANGED_ERP_PREAMBLE) { @@ -4743,13 +4916,13 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, goto out; } - if (changed & BSS_CHANGED_ERP_SLOT) { + if ((changed & BSS_CHANGED_ERP_SLOT) && !priv->ap_fw) { rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot); if (rc) goto out; } - if (vif->bss_conf.assoc && + if (vif->bss_conf.assoc && !priv->ap_fw && (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT))) { rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates); @@ -4829,11 +5002,9 @@ static void mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed) { - struct mwl8k_priv *priv = hw->priv; - - if (!priv->ap_fw) + if (vif->type == NL80211_IFTYPE_STATION) mwl8k_bss_info_changed_sta(hw, vif, info, changed); - else + if (vif->type == NL80211_IFTYPE_AP) mwl8k_bss_info_changed_ap(hw, vif, info, changed); } @@ -5094,7 +5265,7 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int i, rc = 0; struct mwl8k_priv *priv = hw->priv; struct mwl8k_ampdu_stream *stream; - u8 *addr = sta->addr; + u8 *addr = sta->addr, idx; struct mwl8k_sta *sta_info = MWL8K_STA(sta); if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION)) @@ -5172,11 +5343,14 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); break; - case IEEE80211_AMPDU_TX_STOP: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: if (stream) { if (stream->state == AMPDU_STREAM_ACTIVE) { + idx = stream->idx; spin_unlock(&priv->stream_lock); - mwl8k_destroy_ba(hw, stream); + mwl8k_destroy_ba(hw, idx); spin_lock(&priv->stream_lock); } mwl8k_remove_stream(hw, stream); @@ -5192,8 +5366,9 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (!rc) stream->state = AMPDU_STREAM_ACTIVE; else { + idx = stream->idx; spin_unlock(&priv->stream_lock); - mwl8k_destroy_ba(hw, stream); + mwl8k_destroy_ba(hw, idx); spin_lock(&priv->stream_lock); wiphy_debug(hw->wiphy, "Failed adding stream for sta %pM tid %d\n", @@ -5256,7 +5431,7 @@ enum { MWL8366, }; -#define MWL8K_8366_AP_FW_API 2 +#define MWL8K_8366_AP_FW_API 3 #define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw" #define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api) @@ -5296,6 +5471,8 @@ static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, }, { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, }, { PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, }, + { PCI_VDEVICE(MARVELL, 0x2a41), .driver_data = MWL8366, }, + { PCI_VDEVICE(MARVELL, 0x2a42), .driver_data = MWL8366, }, { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, }, { }, }; @@ -5464,6 +5641,7 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw) if (priv->rxd_ops == NULL) { wiphy_err(hw->wiphy, "Driver does not have AP firmware image support for this hardware\n"); + rc = -ENOENT; goto err_stop_firmware; } } else { @@ -5473,6 +5651,7 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw) priv->sniffer_enabled = false; priv->wmm_enabled = false; priv->pending_tx_pkts = 0; + atomic_set(&priv->watchdog_event_pending, 0); rc = mwl8k_rxq_init(hw, 0); if (rc) @@ -5552,6 +5731,15 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw) goto err_free_irq; } + /* Configure Antennas */ + rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3); + if (rc) + wiphy_warn(hw->wiphy, "failed to set # of RX antennas"); + rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7); + if (rc) + wiphy_warn(hw->wiphy, "failed to set # of TX antennas"); + + /* Disable interrupts */ iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); free_irq(priv->pdev->irq, hw); @@ -5639,6 +5827,7 @@ fail: static const struct ieee80211_iface_limit ap_if_limits[] = { { .max = 8, .types = BIT(NL80211_IFTYPE_AP) }, + { .max = 1, .types = BIT(NL80211_IFTYPE_STATION) }, }; static const struct ieee80211_iface_combination ap_if_comb = { @@ -5731,6 +5920,7 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv) if (priv->ap_macids_supported || priv->device_info->fw_image_ap) { hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP); + hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION); hw->wiphy->iface_combinations = &ap_if_comb; hw->wiphy->n_iface_combinations = 1; } @@ -5809,6 +5999,7 @@ static int mwl8k_probe(struct pci_dev *pdev, priv->sram = pci_iomap(pdev, 0, 0x10000); if (priv->sram == NULL) { wiphy_err(hw->wiphy, "Cannot map device SRAM\n"); + rc = -EIO; goto err_iounmap; } @@ -5821,6 +6012,7 @@ static int mwl8k_probe(struct pci_dev *pdev, priv->regs = pci_iomap(pdev, 2, 0x10000); if (priv->regs == NULL) { wiphy_err(hw->wiphy, "Cannot map device registers\n"); + rc = -EIO; goto err_iounmap; } } @@ -5851,6 +6043,8 @@ static int mwl8k_probe(struct pci_dev *pdev, priv->hw_restart_in_progress = false; + priv->running_bsses = 0; + return rc; err_stop_firmware: diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c index 88e3ad2d1db8..38ec8d19ac29 100644 --- a/drivers/net/wireless/orinoco/main.c +++ b/drivers/net/wireless/orinoco/main.c @@ -853,12 +853,8 @@ void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw) int err; desc = kmalloc(sizeof(*desc), GFP_ATOMIC); - if (!desc) { - printk(KERN_WARNING - "%s: Can't allocate space for RX descriptor\n", - dev->name); + if (!desc) goto update_stats; - } rxfid = hermes_read_regn(hw, RXFID); @@ -1336,10 +1332,9 @@ static void qbuf_scan(struct orinoco_private *priv, void *buf, unsigned long flags; sd = kmalloc(sizeof(*sd), GFP_ATOMIC); - if (!sd) { - printk(KERN_ERR "%s: failed to alloc memory\n", __func__); + if (!sd) return; - } + sd->buf = buf; sd->len = len; sd->type = type; @@ -1357,10 +1352,9 @@ static void qabort_scan(struct orinoco_private *priv) unsigned long flags; sd = kmalloc(sizeof(*sd), GFP_ATOMIC); - if (!sd) { - printk(KERN_ERR "%s: failed to alloc memory\n", __func__); + if (!sd) return; - } + sd->len = -1; /* Abort */ spin_lock_irqsave(&priv->scan_lock, flags); @@ -2290,7 +2284,6 @@ int orinoco_if_add(struct orinoco_private *priv, netif_carrier_off(dev); memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN); - memcpy(dev->perm_addr, wiphy->perm_addr, ETH_ALEN); dev->base_addr = base_addr; dev->irq = irq; diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c index 01624dcaf73e..7744f42de1ea 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c @@ -804,10 +804,15 @@ static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset) static int ezusb_firmware_download(struct ezusb_priv *upriv, struct ez_usb_fw *fw) { - u8 fw_buffer[FW_BUF_SIZE]; + u8 *fw_buffer; int retval, addr; int variant_offset; + fw_buffer = kmalloc(FW_BUF_SIZE, GFP_KERNEL); + if (!fw_buffer) { + printk(KERN_ERR PFX "Out of memory for firmware buffer.\n"); + return -ENOMEM; + } /* * This byte is 1 and should be replaced with 0. The offset is * 0x10AD in version 0.0.6. The byte in question should follow @@ -859,6 +864,7 @@ static int ezusb_firmware_download(struct ezusb_priv *upriv, printk(KERN_ERR PFX "Firmware download failed, error %d\n", retval); exit: + kfree(fw_buffer); return retval; } @@ -1681,7 +1687,8 @@ static int ezusb_probe(struct usb_interface *interface, firmware.code = fw_entry->data; } if (firmware.size && firmware.code) { - ezusb_firmware_download(upriv, &firmware); + if (ezusb_firmware_download(upriv, &firmware)) + goto error; } else { err("No firmware to download"); goto error; diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c index 96e39edfec77..e8c5714bfd11 100644 --- a/drivers/net/wireless/orinoco/scan.c +++ b/drivers/net/wireless/orinoco/scan.c @@ -125,7 +125,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv, cbss = cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp, capability, beacon_interval, ie_buf, ie_len, signal, GFP_KERNEL); - cfg80211_put_bss(cbss); + cfg80211_put_bss(wiphy, cbss); } void orinoco_add_extscan_result(struct orinoco_private *priv, @@ -158,7 +158,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv, cbss = cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp, capability, beacon_interval, ie, ie_len, signal, GFP_KERNEL); - cfg80211_put_bss(cbss); + cfg80211_put_bss(wiphy, cbss); } void orinoco_add_hostscan_results(struct orinoco_private *priv, diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 933e5d941937..57e3af8ebb4b 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -559,6 +559,7 @@ static int p54p_probe(struct pci_dev *pdev, mem_len = pci_resource_len(pdev, 0); if (mem_len < sizeof(struct p54p_csr)) { dev_err(&pdev->dev, "Too short PCI resources\n"); + err = -ENODEV; goto err_disable_dev; } @@ -568,8 +569,10 @@ static int p54p_probe(struct pci_dev *pdev, goto err_disable_dev; } - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || - pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto err_free_reg; } diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 800a16526c8e..b9deef66cf4b 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -84,8 +84,8 @@ static struct usb_device_id p54u_table[] = { {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ - {USB_DEVICE(0x083a, 0x4503)}, /* T-Com Sinus 154 data II */ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ + {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */ {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ @@ -510,11 +510,8 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev) return err; tmp = buf = kmalloc(P54U_FW_BLOCK, GFP_KERNEL); - if (!buf) { - dev_err(&priv->udev->dev, "(p54usb) cannot allocate firmware" - "upload buffer!\n"); + if (!buf) return -ENOMEM; - } left = block_size = min((size_t)P54U_FW_BLOCK, priv->fw->size); strcpy(buf, p54u_firmware_upload_3887); @@ -637,11 +634,8 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev) const u8 *data; buf = kmalloc(512, GFP_KERNEL); - if (!buf) { - dev_err(&priv->udev->dev, "(p54usb) firmware buffer " - "alloc failed!\n"); + if (!buf) return -ENOMEM; - } #define P54U_WRITE(type, addr, data) \ do {\ diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index 4e44b1af119a..1c22b81e6ef3 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c @@ -1503,6 +1503,7 @@ static int prism54_get_auth(struct net_device *ndev, case DOT11_AUTH_BOTH: case DOT11_AUTH_SK: param->value = IW_AUTH_ALG_SHARED_KEY; + break; case DOT11_AUTH_NONE: default: param->value = 0; diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c index c5404cb59e08..9f19cceab487 100644 --- a/drivers/net/wireless/prism54/islpci_mgt.c +++ b/drivers/net/wireless/prism54/islpci_mgt.c @@ -123,11 +123,8 @@ islpci_mgmt_rx_fill(struct net_device *ndev) if (buf->mem == NULL) { buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC); - if (!buf->mem) { - printk(KERN_WARNING - "Error allocating management frame.\n"); + if (!buf->mem) return -ENOMEM; - } buf->size = MGMT_FRAME_SIZE; } if (buf->pci_addr == 0) { @@ -356,14 +353,11 @@ islpci_mgt_receive(struct net_device *ndev) /* Determine frame size, skipping OID_INL_TUNNEL headers. */ size = PIMFOR_HEADER_SIZE + header->length; - frame = kmalloc(sizeof (struct islpci_mgmtframe) + size, + frame = kmalloc(sizeof(struct islpci_mgmtframe) + size, GFP_ATOMIC); - if (!frame) { - printk(KERN_WARNING - "%s: Out of memory, cannot handle oid 0x%08x\n", - ndev->name, header->oid); + if (!frame) continue; - } + frame->ndev = ndev; memcpy(&frame->buf, header, size); frame->header = (pimfor_header_t *) frame->buf; diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 598ca1cafb95..e7cf37f550d1 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -1107,12 +1107,15 @@ static int ray_get_essid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { ray_dev_t *local = netdev_priv(dev); + UCHAR tmp[IW_ESSID_MAX_SIZE + 1]; /* Get the essid that was set */ memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE); + memcpy(tmp, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE); + tmp[IW_ESSID_MAX_SIZE] = '\0'; /* Push it out ! */ - wrqu->essid.length = strlen(extra); + wrqu->essid.length = strlen(tmp); wrqu->essid.flags = 1; /* active */ return 0; @@ -1842,6 +1845,8 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) UCHAR tmp; UCHAR cmd; UCHAR status; + UCHAR memtmp[ESSID_SIZE + 1]; + if (dev == NULL) /* Note that we want interrupts with dev->start == 0 */ return IRQ_NONE; @@ -1901,17 +1906,21 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) break; case CCS_START_NETWORK: case CCS_JOIN_NETWORK: + memcpy(memtmp, local->sparm.b4.a_current_ess_id, + ESSID_SIZE); + memtmp[ESSID_SIZE] = '\0'; + if (status == CCS_COMMAND_COMPLETE) { if (readb (&pccs->var.start_network.net_initiated) == 1) { dev_dbg(&link->dev, "ray_cs interrupt network \"%s\" started\n", - local->sparm.b4.a_current_ess_id); + memtmp); } else { dev_dbg(&link->dev, "ray_cs interrupt network \"%s\" joined\n", - local->sparm.b4.a_current_ess_id); + memtmp); } memcpy_fromio(&local->bss_id, pccs->var.start_network.bssid, @@ -1939,12 +1948,12 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) if (status == CCS_START_NETWORK) { dev_dbg(&link->dev, "ray_cs interrupt network \"%s\" start failed\n", - local->sparm.b4.a_current_ess_id); + memtmp); local->timer.function = start_net; } else { dev_dbg(&link->dev, "ray_cs interrupt network \"%s\" join failed\n", - local->sparm.b4.a_current_ess_id); + memtmp); local->timer.function = join_net; } add_timer(&local->timer); diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index abe1d039be81..525fd7521dff 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -1621,11 +1621,8 @@ static void set_multicast_list(struct usbnet *usbdev) } else if (mc_count) { int i = 0; - mc_addrs = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC); + mc_addrs = kmalloc_array(mc_count, ETH_ALEN, GFP_ATOMIC); if (!mc_addrs) { - netdev_warn(usbdev->net, - "couldn't alloc %d bytes of memory\n", - mc_count * ETH_ALEN); netif_addr_unlock_bh(usbdev->net); return; } @@ -2029,7 +2026,7 @@ static bool rndis_bss_info_update(struct usbnet *usbdev, bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac, timestamp, capability, beacon_interval, ie, ie_len, signal, GFP_KERNEL); - cfg80211_put_bss(bss); + cfg80211_put_bss(priv->wdev.wiphy, bss); return (bss != NULL); } @@ -2718,7 +2715,7 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid, bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid, timestamp, capability, beacon_period, ie_buf, ie_len, signal, GFP_KERNEL); - cfg80211_put_bss(bss); + cfg80211_put_bss(priv->wdev.wiphy, bss); } /* diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index a2d2bc2c7b3d..221beaaa83f1 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -1185,8 +1185,14 @@ static void rt2400pci_write_beacon(struct queue_entry *entry, rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); rt2x00pci_register_write(rt2x00dev, CSR14, reg); - rt2x00queue_map_txskb(entry); - + if (rt2x00queue_map_txskb(entry)) { + ERROR(rt2x00dev, "Fail to map beacon, aborting\n"); + goto out; + } + /* + * Enable beaconing again. + */ + rt2x00_set_field32(®, CSR14_BEACON_GEN, 1); /* * Write the TX descriptor for the beacon. */ @@ -1196,7 +1202,7 @@ static void rt2400pci_write_beacon(struct queue_entry *entry, * Dump beacon to userspace through debugfs. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); - +out: /* * Enable beaconing again. */ diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 9bea10f53f0a..39edc59e8d03 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -1338,7 +1338,10 @@ static void rt2500pci_write_beacon(struct queue_entry *entry, rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); rt2x00pci_register_write(rt2x00dev, CSR14, reg); - rt2x00queue_map_txskb(entry); + if (rt2x00queue_map_txskb(entry)) { + ERROR(rt2x00dev, "Fail to map beacon, aborting\n"); + goto out; + } /* * Write the TX descriptor for the beacon. @@ -1349,7 +1352,7 @@ static void rt2500pci_write_beacon(struct queue_entry *entry, * Dump beacon to userspace through debugfs. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); - +out: /* * Enable beaconing again. */ diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 197b4466a5d2..a658b4bc7da2 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -80,7 +80,7 @@ static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev) rt2x00_rf(rt2x00dev, RF3022)) return true; - NOTICE(rt2x00dev, "Unknown RF chipset on rt305x\n"); + WARNING(rt2x00dev, "Unknown RF chipset on rt305x\n"); return false; } @@ -1296,8 +1296,7 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_CONTROL)); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_PSPOLL, !(filter_flags & FIF_PSPOLL)); - rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BA, - !(filter_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BA, 0); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BAR, !(filter_flags & FIF_CONTROL)); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CNTL, @@ -3866,6 +3865,400 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev, return rfcsr24; } +static void rt2800_init_rfcsr_305x_soc(struct rt2x00_dev *rt2x00dev) +{ + rt2800_rfcsr_write(rt2x00dev, 0, 0x50); + rt2800_rfcsr_write(rt2x00dev, 1, 0x01); + rt2800_rfcsr_write(rt2x00dev, 2, 0xf7); + rt2800_rfcsr_write(rt2x00dev, 3, 0x75); + rt2800_rfcsr_write(rt2x00dev, 4, 0x40); + rt2800_rfcsr_write(rt2x00dev, 5, 0x03); + rt2800_rfcsr_write(rt2x00dev, 6, 0x02); + rt2800_rfcsr_write(rt2x00dev, 7, 0x50); + rt2800_rfcsr_write(rt2x00dev, 8, 0x39); + rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); + rt2800_rfcsr_write(rt2x00dev, 10, 0x60); + rt2800_rfcsr_write(rt2x00dev, 11, 0x21); + rt2800_rfcsr_write(rt2x00dev, 12, 0x75); + rt2800_rfcsr_write(rt2x00dev, 13, 0x75); + rt2800_rfcsr_write(rt2x00dev, 14, 0x90); + rt2800_rfcsr_write(rt2x00dev, 15, 0x58); + rt2800_rfcsr_write(rt2x00dev, 16, 0xb3); + rt2800_rfcsr_write(rt2x00dev, 17, 0x92); + rt2800_rfcsr_write(rt2x00dev, 18, 0x2c); + rt2800_rfcsr_write(rt2x00dev, 19, 0x02); + rt2800_rfcsr_write(rt2x00dev, 20, 0xba); + rt2800_rfcsr_write(rt2x00dev, 21, 0xdb); + rt2800_rfcsr_write(rt2x00dev, 22, 0x00); + rt2800_rfcsr_write(rt2x00dev, 23, 0x31); + rt2800_rfcsr_write(rt2x00dev, 24, 0x08); + rt2800_rfcsr_write(rt2x00dev, 25, 0x01); + rt2800_rfcsr_write(rt2x00dev, 26, 0x25); + rt2800_rfcsr_write(rt2x00dev, 27, 0x23); + rt2800_rfcsr_write(rt2x00dev, 28, 0x13); + rt2800_rfcsr_write(rt2x00dev, 29, 0x83); + rt2800_rfcsr_write(rt2x00dev, 30, 0x00); + rt2800_rfcsr_write(rt2x00dev, 31, 0x00); +} + +static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev) +{ + rt2800_rfcsr_write(rt2x00dev, 4, 0x40); + rt2800_rfcsr_write(rt2x00dev, 5, 0x03); + rt2800_rfcsr_write(rt2x00dev, 6, 0x02); + rt2800_rfcsr_write(rt2x00dev, 7, 0x60); + rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); + rt2800_rfcsr_write(rt2x00dev, 10, 0x41); + rt2800_rfcsr_write(rt2x00dev, 11, 0x21); + rt2800_rfcsr_write(rt2x00dev, 12, 0x7b); + rt2800_rfcsr_write(rt2x00dev, 14, 0x90); + rt2800_rfcsr_write(rt2x00dev, 15, 0x58); + rt2800_rfcsr_write(rt2x00dev, 16, 0xb3); + rt2800_rfcsr_write(rt2x00dev, 17, 0x92); + rt2800_rfcsr_write(rt2x00dev, 18, 0x2c); + rt2800_rfcsr_write(rt2x00dev, 19, 0x02); + rt2800_rfcsr_write(rt2x00dev, 20, 0xba); + rt2800_rfcsr_write(rt2x00dev, 21, 0xdb); + rt2800_rfcsr_write(rt2x00dev, 24, 0x16); + rt2800_rfcsr_write(rt2x00dev, 25, 0x01); + rt2800_rfcsr_write(rt2x00dev, 29, 0x1f); +} + +static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev) +{ + rt2800_rfcsr_write(rt2x00dev, 1, 0x0f); + rt2800_rfcsr_write(rt2x00dev, 2, 0x80); + rt2800_rfcsr_write(rt2x00dev, 3, 0x08); + rt2800_rfcsr_write(rt2x00dev, 4, 0x00); + rt2800_rfcsr_write(rt2x00dev, 6, 0xa0); + rt2800_rfcsr_write(rt2x00dev, 8, 0xf3); + rt2800_rfcsr_write(rt2x00dev, 9, 0x02); + rt2800_rfcsr_write(rt2x00dev, 10, 0x53); + rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); + rt2800_rfcsr_write(rt2x00dev, 12, 0x46); + rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); + rt2800_rfcsr_write(rt2x00dev, 18, 0x02); + rt2800_rfcsr_write(rt2x00dev, 22, 0x20); + rt2800_rfcsr_write(rt2x00dev, 25, 0x83); + rt2800_rfcsr_write(rt2x00dev, 26, 0x82); + rt2800_rfcsr_write(rt2x00dev, 27, 0x09); + rt2800_rfcsr_write(rt2x00dev, 29, 0x10); + rt2800_rfcsr_write(rt2x00dev, 30, 0x10); + rt2800_rfcsr_write(rt2x00dev, 31, 0x80); + rt2800_rfcsr_write(rt2x00dev, 32, 0x80); + rt2800_rfcsr_write(rt2x00dev, 33, 0x00); + rt2800_rfcsr_write(rt2x00dev, 34, 0x05); + rt2800_rfcsr_write(rt2x00dev, 35, 0x12); + rt2800_rfcsr_write(rt2x00dev, 36, 0x00); + rt2800_rfcsr_write(rt2x00dev, 38, 0x85); + rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); + rt2800_rfcsr_write(rt2x00dev, 40, 0x0b); + rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); + rt2800_rfcsr_write(rt2x00dev, 42, 0xd5); + rt2800_rfcsr_write(rt2x00dev, 43, 0x7b); + rt2800_rfcsr_write(rt2x00dev, 44, 0x0e); + rt2800_rfcsr_write(rt2x00dev, 45, 0xa2); + rt2800_rfcsr_write(rt2x00dev, 46, 0x73); + rt2800_rfcsr_write(rt2x00dev, 47, 0x00); + rt2800_rfcsr_write(rt2x00dev, 48, 0x10); + rt2800_rfcsr_write(rt2x00dev, 49, 0x98); + rt2800_rfcsr_write(rt2x00dev, 52, 0x38); + rt2800_rfcsr_write(rt2x00dev, 53, 0x00); + rt2800_rfcsr_write(rt2x00dev, 54, 0x78); + rt2800_rfcsr_write(rt2x00dev, 55, 0x43); + rt2800_rfcsr_write(rt2x00dev, 56, 0x02); + rt2800_rfcsr_write(rt2x00dev, 57, 0x80); + rt2800_rfcsr_write(rt2x00dev, 58, 0x7f); + rt2800_rfcsr_write(rt2x00dev, 59, 0x09); + rt2800_rfcsr_write(rt2x00dev, 60, 0x45); + rt2800_rfcsr_write(rt2x00dev, 61, 0xc1); +} + +static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev) +{ + rt2800_rfcsr_write(rt2x00dev, 0, 0xf0); + rt2800_rfcsr_write(rt2x00dev, 1, 0x23); + rt2800_rfcsr_write(rt2x00dev, 2, 0x50); + rt2800_rfcsr_write(rt2x00dev, 3, 0x18); + rt2800_rfcsr_write(rt2x00dev, 4, 0x00); + rt2800_rfcsr_write(rt2x00dev, 5, 0x00); + rt2800_rfcsr_write(rt2x00dev, 6, 0x33); + rt2800_rfcsr_write(rt2x00dev, 7, 0x00); + rt2800_rfcsr_write(rt2x00dev, 8, 0xf1); + rt2800_rfcsr_write(rt2x00dev, 9, 0x02); + rt2800_rfcsr_write(rt2x00dev, 10, 0xd2); + rt2800_rfcsr_write(rt2x00dev, 11, 0x42); + rt2800_rfcsr_write(rt2x00dev, 12, 0x1c); + rt2800_rfcsr_write(rt2x00dev, 13, 0x00); + rt2800_rfcsr_write(rt2x00dev, 14, 0x5a); + rt2800_rfcsr_write(rt2x00dev, 15, 0x00); + rt2800_rfcsr_write(rt2x00dev, 16, 0x01); + rt2800_rfcsr_write(rt2x00dev, 18, 0x45); + rt2800_rfcsr_write(rt2x00dev, 19, 0x02); + rt2800_rfcsr_write(rt2x00dev, 20, 0x00); + rt2800_rfcsr_write(rt2x00dev, 21, 0x00); + rt2800_rfcsr_write(rt2x00dev, 22, 0x00); + rt2800_rfcsr_write(rt2x00dev, 23, 0x00); + rt2800_rfcsr_write(rt2x00dev, 24, 0x00); + rt2800_rfcsr_write(rt2x00dev, 25, 0x80); + rt2800_rfcsr_write(rt2x00dev, 26, 0x00); + rt2800_rfcsr_write(rt2x00dev, 27, 0x03); + rt2800_rfcsr_write(rt2x00dev, 28, 0x03); + rt2800_rfcsr_write(rt2x00dev, 29, 0x00); + rt2800_rfcsr_write(rt2x00dev, 30, 0x10); + rt2800_rfcsr_write(rt2x00dev, 31, 0x80); + rt2800_rfcsr_write(rt2x00dev, 32, 0x80); + rt2800_rfcsr_write(rt2x00dev, 33, 0x00); + rt2800_rfcsr_write(rt2x00dev, 34, 0x01); + rt2800_rfcsr_write(rt2x00dev, 35, 0x03); + rt2800_rfcsr_write(rt2x00dev, 36, 0xbd); + rt2800_rfcsr_write(rt2x00dev, 37, 0x3c); + rt2800_rfcsr_write(rt2x00dev, 38, 0x5f); + rt2800_rfcsr_write(rt2x00dev, 39, 0xc5); + rt2800_rfcsr_write(rt2x00dev, 40, 0x33); + rt2800_rfcsr_write(rt2x00dev, 41, 0x5b); + rt2800_rfcsr_write(rt2x00dev, 42, 0x5b); + rt2800_rfcsr_write(rt2x00dev, 43, 0xdb); + rt2800_rfcsr_write(rt2x00dev, 44, 0xdb); + rt2800_rfcsr_write(rt2x00dev, 45, 0xdb); + rt2800_rfcsr_write(rt2x00dev, 46, 0xdd); + rt2800_rfcsr_write(rt2x00dev, 47, 0x0d); + rt2800_rfcsr_write(rt2x00dev, 48, 0x14); + rt2800_rfcsr_write(rt2x00dev, 49, 0x00); + rt2800_rfcsr_write(rt2x00dev, 50, 0x2d); + rt2800_rfcsr_write(rt2x00dev, 51, 0x7f); + rt2800_rfcsr_write(rt2x00dev, 52, 0x00); + rt2800_rfcsr_write(rt2x00dev, 53, 0x52); + rt2800_rfcsr_write(rt2x00dev, 54, 0x1b); + rt2800_rfcsr_write(rt2x00dev, 55, 0x7f); + rt2800_rfcsr_write(rt2x00dev, 56, 0x00); + rt2800_rfcsr_write(rt2x00dev, 57, 0x52); + rt2800_rfcsr_write(rt2x00dev, 58, 0x1b); + rt2800_rfcsr_write(rt2x00dev, 59, 0x00); + rt2800_rfcsr_write(rt2x00dev, 60, 0x00); + rt2800_rfcsr_write(rt2x00dev, 61, 0x00); + rt2800_rfcsr_write(rt2x00dev, 62, 0x00); + rt2800_rfcsr_write(rt2x00dev, 63, 0x00); +} + +static void rt2800_init_rfcsr_3390(struct rt2x00_dev *rt2x00dev) +{ + rt2800_rfcsr_write(rt2x00dev, 0, 0xa0); + rt2800_rfcsr_write(rt2x00dev, 1, 0xe1); + rt2800_rfcsr_write(rt2x00dev, 2, 0xf1); + rt2800_rfcsr_write(rt2x00dev, 3, 0x62); + rt2800_rfcsr_write(rt2x00dev, 4, 0x40); + rt2800_rfcsr_write(rt2x00dev, 5, 0x8b); + rt2800_rfcsr_write(rt2x00dev, 6, 0x42); + rt2800_rfcsr_write(rt2x00dev, 7, 0x34); + rt2800_rfcsr_write(rt2x00dev, 8, 0x00); + rt2800_rfcsr_write(rt2x00dev, 9, 0xc0); + rt2800_rfcsr_write(rt2x00dev, 10, 0x61); + rt2800_rfcsr_write(rt2x00dev, 11, 0x21); + rt2800_rfcsr_write(rt2x00dev, 12, 0x3b); + rt2800_rfcsr_write(rt2x00dev, 13, 0xe0); + rt2800_rfcsr_write(rt2x00dev, 14, 0x90); + rt2800_rfcsr_write(rt2x00dev, 15, 0x53); + rt2800_rfcsr_write(rt2x00dev, 16, 0xe0); + rt2800_rfcsr_write(rt2x00dev, 17, 0x94); + rt2800_rfcsr_write(rt2x00dev, 18, 0x5c); + rt2800_rfcsr_write(rt2x00dev, 19, 0x4a); + rt2800_rfcsr_write(rt2x00dev, 20, 0xb2); + rt2800_rfcsr_write(rt2x00dev, 21, 0xf6); + rt2800_rfcsr_write(rt2x00dev, 22, 0x00); + rt2800_rfcsr_write(rt2x00dev, 23, 0x14); + rt2800_rfcsr_write(rt2x00dev, 24, 0x08); + rt2800_rfcsr_write(rt2x00dev, 25, 0x3d); + rt2800_rfcsr_write(rt2x00dev, 26, 0x85); + rt2800_rfcsr_write(rt2x00dev, 27, 0x00); + rt2800_rfcsr_write(rt2x00dev, 28, 0x41); + rt2800_rfcsr_write(rt2x00dev, 29, 0x8f); + rt2800_rfcsr_write(rt2x00dev, 30, 0x20); + rt2800_rfcsr_write(rt2x00dev, 31, 0x0f); +} + +static void rt2800_init_rfcsr_3572(struct rt2x00_dev *rt2x00dev) +{ + rt2800_rfcsr_write(rt2x00dev, 0, 0x70); + rt2800_rfcsr_write(rt2x00dev, 1, 0x81); + rt2800_rfcsr_write(rt2x00dev, 2, 0xf1); + rt2800_rfcsr_write(rt2x00dev, 3, 0x02); + rt2800_rfcsr_write(rt2x00dev, 4, 0x4c); + rt2800_rfcsr_write(rt2x00dev, 5, 0x05); + rt2800_rfcsr_write(rt2x00dev, 6, 0x4a); + rt2800_rfcsr_write(rt2x00dev, 7, 0xd8); + rt2800_rfcsr_write(rt2x00dev, 9, 0xc3); + rt2800_rfcsr_write(rt2x00dev, 10, 0xf1); + rt2800_rfcsr_write(rt2x00dev, 11, 0xb9); + rt2800_rfcsr_write(rt2x00dev, 12, 0x70); + rt2800_rfcsr_write(rt2x00dev, 13, 0x65); + rt2800_rfcsr_write(rt2x00dev, 14, 0xa0); + rt2800_rfcsr_write(rt2x00dev, 15, 0x53); + rt2800_rfcsr_write(rt2x00dev, 16, 0x4c); + rt2800_rfcsr_write(rt2x00dev, 17, 0x23); + rt2800_rfcsr_write(rt2x00dev, 18, 0xac); + rt2800_rfcsr_write(rt2x00dev, 19, 0x93); + rt2800_rfcsr_write(rt2x00dev, 20, 0xb3); + rt2800_rfcsr_write(rt2x00dev, 21, 0xd0); + rt2800_rfcsr_write(rt2x00dev, 22, 0x00); + rt2800_rfcsr_write(rt2x00dev, 23, 0x3c); + rt2800_rfcsr_write(rt2x00dev, 24, 0x16); + rt2800_rfcsr_write(rt2x00dev, 25, 0x15); + rt2800_rfcsr_write(rt2x00dev, 26, 0x85); + rt2800_rfcsr_write(rt2x00dev, 27, 0x00); + rt2800_rfcsr_write(rt2x00dev, 28, 0x00); + rt2800_rfcsr_write(rt2x00dev, 29, 0x9b); + rt2800_rfcsr_write(rt2x00dev, 30, 0x09); + rt2800_rfcsr_write(rt2x00dev, 31, 0x10); +} + +static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev) +{ + rt2800_rfcsr_write(rt2x00dev, 1, 0x0f); + rt2800_rfcsr_write(rt2x00dev, 2, 0x80); + rt2800_rfcsr_write(rt2x00dev, 3, 0x88); + rt2800_rfcsr_write(rt2x00dev, 5, 0x10); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) + rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); + else + rt2800_rfcsr_write(rt2x00dev, 6, 0xa0); + rt2800_rfcsr_write(rt2x00dev, 7, 0x00); + rt2800_rfcsr_write(rt2x00dev, 10, 0x53); + rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); + rt2800_rfcsr_write(rt2x00dev, 12, 0xc6); + rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); + rt2800_rfcsr_write(rt2x00dev, 14, 0x00); + rt2800_rfcsr_write(rt2x00dev, 15, 0x00); + rt2800_rfcsr_write(rt2x00dev, 16, 0x00); + rt2800_rfcsr_write(rt2x00dev, 18, 0x03); + rt2800_rfcsr_write(rt2x00dev, 19, 0x00); + + rt2800_rfcsr_write(rt2x00dev, 20, 0x00); + rt2800_rfcsr_write(rt2x00dev, 21, 0x00); + rt2800_rfcsr_write(rt2x00dev, 22, 0x20); + rt2800_rfcsr_write(rt2x00dev, 23, 0x00); + rt2800_rfcsr_write(rt2x00dev, 24, 0x00); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) + rt2800_rfcsr_write(rt2x00dev, 25, 0x80); + else + rt2800_rfcsr_write(rt2x00dev, 25, 0xc0); + rt2800_rfcsr_write(rt2x00dev, 26, 0x00); + rt2800_rfcsr_write(rt2x00dev, 27, 0x09); + rt2800_rfcsr_write(rt2x00dev, 28, 0x00); + rt2800_rfcsr_write(rt2x00dev, 29, 0x10); + + rt2800_rfcsr_write(rt2x00dev, 30, 0x00); + rt2800_rfcsr_write(rt2x00dev, 31, 0x80); + rt2800_rfcsr_write(rt2x00dev, 32, 0x80); + rt2800_rfcsr_write(rt2x00dev, 33, 0x00); + rt2800_rfcsr_write(rt2x00dev, 34, 0x07); + rt2800_rfcsr_write(rt2x00dev, 35, 0x12); + rt2800_rfcsr_write(rt2x00dev, 36, 0x00); + rt2800_rfcsr_write(rt2x00dev, 37, 0x08); + rt2800_rfcsr_write(rt2x00dev, 38, 0x85); + rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); + + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) + rt2800_rfcsr_write(rt2x00dev, 40, 0x0b); + else + rt2800_rfcsr_write(rt2x00dev, 40, 0x4b); + rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); + rt2800_rfcsr_write(rt2x00dev, 42, 0xd2); + rt2800_rfcsr_write(rt2x00dev, 43, 0x9a); + rt2800_rfcsr_write(rt2x00dev, 44, 0x0e); + rt2800_rfcsr_write(rt2x00dev, 45, 0xa2); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) + rt2800_rfcsr_write(rt2x00dev, 46, 0x73); + else + rt2800_rfcsr_write(rt2x00dev, 46, 0x7b); + rt2800_rfcsr_write(rt2x00dev, 47, 0x00); + rt2800_rfcsr_write(rt2x00dev, 48, 0x10); + rt2800_rfcsr_write(rt2x00dev, 49, 0x94); + + rt2800_rfcsr_write(rt2x00dev, 52, 0x38); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) + rt2800_rfcsr_write(rt2x00dev, 53, 0x00); + else + rt2800_rfcsr_write(rt2x00dev, 53, 0x84); + rt2800_rfcsr_write(rt2x00dev, 54, 0x78); + rt2800_rfcsr_write(rt2x00dev, 55, 0x44); + rt2800_rfcsr_write(rt2x00dev, 56, 0x22); + rt2800_rfcsr_write(rt2x00dev, 57, 0x80); + rt2800_rfcsr_write(rt2x00dev, 58, 0x7f); + rt2800_rfcsr_write(rt2x00dev, 59, 0x63); + + rt2800_rfcsr_write(rt2x00dev, 60, 0x45); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) + rt2800_rfcsr_write(rt2x00dev, 61, 0xd1); + else + rt2800_rfcsr_write(rt2x00dev, 61, 0xdd); + rt2800_rfcsr_write(rt2x00dev, 62, 0x00); + rt2800_rfcsr_write(rt2x00dev, 63, 0x00); +} + +static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev) +{ + rt2800_rfcsr_write(rt2x00dev, 1, 0x17); + rt2800_rfcsr_write(rt2x00dev, 2, 0x80); + rt2800_rfcsr_write(rt2x00dev, 3, 0x88); + rt2800_rfcsr_write(rt2x00dev, 5, 0x10); + rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); + rt2800_rfcsr_write(rt2x00dev, 7, 0x00); + rt2800_rfcsr_write(rt2x00dev, 10, 0x53); + rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); + rt2800_rfcsr_write(rt2x00dev, 12, 0x46); + rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); + rt2800_rfcsr_write(rt2x00dev, 14, 0x00); + rt2800_rfcsr_write(rt2x00dev, 15, 0x00); + rt2800_rfcsr_write(rt2x00dev, 16, 0x00); + rt2800_rfcsr_write(rt2x00dev, 18, 0x03); + rt2800_rfcsr_write(rt2x00dev, 19, 0x4d); + rt2800_rfcsr_write(rt2x00dev, 20, 0x00); + rt2800_rfcsr_write(rt2x00dev, 21, 0x8d); + rt2800_rfcsr_write(rt2x00dev, 22, 0x20); + rt2800_rfcsr_write(rt2x00dev, 23, 0x0b); + rt2800_rfcsr_write(rt2x00dev, 24, 0x44); + rt2800_rfcsr_write(rt2x00dev, 25, 0x80); + rt2800_rfcsr_write(rt2x00dev, 26, 0x82); + rt2800_rfcsr_write(rt2x00dev, 27, 0x09); + rt2800_rfcsr_write(rt2x00dev, 28, 0x00); + rt2800_rfcsr_write(rt2x00dev, 29, 0x10); + rt2800_rfcsr_write(rt2x00dev, 30, 0x10); + rt2800_rfcsr_write(rt2x00dev, 31, 0x80); + rt2800_rfcsr_write(rt2x00dev, 32, 0x20); + rt2800_rfcsr_write(rt2x00dev, 33, 0xC0); + rt2800_rfcsr_write(rt2x00dev, 34, 0x07); + rt2800_rfcsr_write(rt2x00dev, 35, 0x12); + rt2800_rfcsr_write(rt2x00dev, 36, 0x00); + rt2800_rfcsr_write(rt2x00dev, 37, 0x08); + rt2800_rfcsr_write(rt2x00dev, 38, 0x89); + rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); + rt2800_rfcsr_write(rt2x00dev, 40, 0x0f); + rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); + rt2800_rfcsr_write(rt2x00dev, 42, 0xd5); + rt2800_rfcsr_write(rt2x00dev, 43, 0x9b); + rt2800_rfcsr_write(rt2x00dev, 44, 0x0e); + rt2800_rfcsr_write(rt2x00dev, 45, 0xa2); + rt2800_rfcsr_write(rt2x00dev, 46, 0x73); + rt2800_rfcsr_write(rt2x00dev, 47, 0x0c); + rt2800_rfcsr_write(rt2x00dev, 48, 0x10); + rt2800_rfcsr_write(rt2x00dev, 49, 0x94); + rt2800_rfcsr_write(rt2x00dev, 50, 0x94); + rt2800_rfcsr_write(rt2x00dev, 51, 0x3a); + rt2800_rfcsr_write(rt2x00dev, 52, 0x48); + rt2800_rfcsr_write(rt2x00dev, 53, 0x44); + rt2800_rfcsr_write(rt2x00dev, 54, 0x38); + rt2800_rfcsr_write(rt2x00dev, 55, 0x43); + rt2800_rfcsr_write(rt2x00dev, 56, 0xa1); + rt2800_rfcsr_write(rt2x00dev, 57, 0x00); + rt2800_rfcsr_write(rt2x00dev, 58, 0x39); + rt2800_rfcsr_write(rt2x00dev, 59, 0x07); + rt2800_rfcsr_write(rt2x00dev, 60, 0x45); + rt2800_rfcsr_write(rt2x00dev, 61, 0x91); + rt2800_rfcsr_write(rt2x00dev, 62, 0x39); + rt2800_rfcsr_write(rt2x00dev, 63, 0x07); +} + static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; @@ -3889,6 +4282,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) /* * Init RF calibration. */ + if (rt2x00_rt(rt2x00dev, RT3290) || rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392)) { @@ -3907,379 +4301,35 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); } - if (rt2x00_rt(rt2x00dev, RT3070) || - rt2x00_rt(rt2x00dev, RT3071) || - rt2x00_rt(rt2x00dev, RT3090)) { - rt2800_rfcsr_write(rt2x00dev, 4, 0x40); - rt2800_rfcsr_write(rt2x00dev, 5, 0x03); - rt2800_rfcsr_write(rt2x00dev, 6, 0x02); - rt2800_rfcsr_write(rt2x00dev, 7, 0x60); - rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); - rt2800_rfcsr_write(rt2x00dev, 10, 0x41); - rt2800_rfcsr_write(rt2x00dev, 11, 0x21); - rt2800_rfcsr_write(rt2x00dev, 12, 0x7b); - rt2800_rfcsr_write(rt2x00dev, 14, 0x90); - rt2800_rfcsr_write(rt2x00dev, 15, 0x58); - rt2800_rfcsr_write(rt2x00dev, 16, 0xb3); - rt2800_rfcsr_write(rt2x00dev, 17, 0x92); - rt2800_rfcsr_write(rt2x00dev, 18, 0x2c); - rt2800_rfcsr_write(rt2x00dev, 19, 0x02); - rt2800_rfcsr_write(rt2x00dev, 20, 0xba); - rt2800_rfcsr_write(rt2x00dev, 21, 0xdb); - rt2800_rfcsr_write(rt2x00dev, 24, 0x16); - rt2800_rfcsr_write(rt2x00dev, 25, 0x01); - rt2800_rfcsr_write(rt2x00dev, 29, 0x1f); - } else if (rt2x00_rt(rt2x00dev, RT3290)) { - rt2800_rfcsr_write(rt2x00dev, 1, 0x0f); - rt2800_rfcsr_write(rt2x00dev, 2, 0x80); - rt2800_rfcsr_write(rt2x00dev, 3, 0x08); - rt2800_rfcsr_write(rt2x00dev, 4, 0x00); - rt2800_rfcsr_write(rt2x00dev, 6, 0xa0); - rt2800_rfcsr_write(rt2x00dev, 8, 0xf3); - rt2800_rfcsr_write(rt2x00dev, 9, 0x02); - rt2800_rfcsr_write(rt2x00dev, 10, 0x53); - rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); - rt2800_rfcsr_write(rt2x00dev, 12, 0x46); - rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); - rt2800_rfcsr_write(rt2x00dev, 18, 0x02); - rt2800_rfcsr_write(rt2x00dev, 22, 0x20); - rt2800_rfcsr_write(rt2x00dev, 25, 0x83); - rt2800_rfcsr_write(rt2x00dev, 26, 0x82); - rt2800_rfcsr_write(rt2x00dev, 27, 0x09); - rt2800_rfcsr_write(rt2x00dev, 29, 0x10); - rt2800_rfcsr_write(rt2x00dev, 30, 0x10); - rt2800_rfcsr_write(rt2x00dev, 31, 0x80); - rt2800_rfcsr_write(rt2x00dev, 32, 0x80); - rt2800_rfcsr_write(rt2x00dev, 33, 0x00); - rt2800_rfcsr_write(rt2x00dev, 34, 0x05); - rt2800_rfcsr_write(rt2x00dev, 35, 0x12); - rt2800_rfcsr_write(rt2x00dev, 36, 0x00); - rt2800_rfcsr_write(rt2x00dev, 38, 0x85); - rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); - rt2800_rfcsr_write(rt2x00dev, 40, 0x0b); - rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); - rt2800_rfcsr_write(rt2x00dev, 42, 0xd5); - rt2800_rfcsr_write(rt2x00dev, 43, 0x7b); - rt2800_rfcsr_write(rt2x00dev, 44, 0x0e); - rt2800_rfcsr_write(rt2x00dev, 45, 0xa2); - rt2800_rfcsr_write(rt2x00dev, 46, 0x73); - rt2800_rfcsr_write(rt2x00dev, 47, 0x00); - rt2800_rfcsr_write(rt2x00dev, 48, 0x10); - rt2800_rfcsr_write(rt2x00dev, 49, 0x98); - rt2800_rfcsr_write(rt2x00dev, 52, 0x38); - rt2800_rfcsr_write(rt2x00dev, 53, 0x00); - rt2800_rfcsr_write(rt2x00dev, 54, 0x78); - rt2800_rfcsr_write(rt2x00dev, 55, 0x43); - rt2800_rfcsr_write(rt2x00dev, 56, 0x02); - rt2800_rfcsr_write(rt2x00dev, 57, 0x80); - rt2800_rfcsr_write(rt2x00dev, 58, 0x7f); - rt2800_rfcsr_write(rt2x00dev, 59, 0x09); - rt2800_rfcsr_write(rt2x00dev, 60, 0x45); - rt2800_rfcsr_write(rt2x00dev, 61, 0xc1); - } else if (rt2x00_rt(rt2x00dev, RT3390)) { - rt2800_rfcsr_write(rt2x00dev, 0, 0xa0); - rt2800_rfcsr_write(rt2x00dev, 1, 0xe1); - rt2800_rfcsr_write(rt2x00dev, 2, 0xf1); - rt2800_rfcsr_write(rt2x00dev, 3, 0x62); - rt2800_rfcsr_write(rt2x00dev, 4, 0x40); - rt2800_rfcsr_write(rt2x00dev, 5, 0x8b); - rt2800_rfcsr_write(rt2x00dev, 6, 0x42); - rt2800_rfcsr_write(rt2x00dev, 7, 0x34); - rt2800_rfcsr_write(rt2x00dev, 8, 0x00); - rt2800_rfcsr_write(rt2x00dev, 9, 0xc0); - rt2800_rfcsr_write(rt2x00dev, 10, 0x61); - rt2800_rfcsr_write(rt2x00dev, 11, 0x21); - rt2800_rfcsr_write(rt2x00dev, 12, 0x3b); - rt2800_rfcsr_write(rt2x00dev, 13, 0xe0); - rt2800_rfcsr_write(rt2x00dev, 14, 0x90); - rt2800_rfcsr_write(rt2x00dev, 15, 0x53); - rt2800_rfcsr_write(rt2x00dev, 16, 0xe0); - rt2800_rfcsr_write(rt2x00dev, 17, 0x94); - rt2800_rfcsr_write(rt2x00dev, 18, 0x5c); - rt2800_rfcsr_write(rt2x00dev, 19, 0x4a); - rt2800_rfcsr_write(rt2x00dev, 20, 0xb2); - rt2800_rfcsr_write(rt2x00dev, 21, 0xf6); - rt2800_rfcsr_write(rt2x00dev, 22, 0x00); - rt2800_rfcsr_write(rt2x00dev, 23, 0x14); - rt2800_rfcsr_write(rt2x00dev, 24, 0x08); - rt2800_rfcsr_write(rt2x00dev, 25, 0x3d); - rt2800_rfcsr_write(rt2x00dev, 26, 0x85); - rt2800_rfcsr_write(rt2x00dev, 27, 0x00); - rt2800_rfcsr_write(rt2x00dev, 28, 0x41); - rt2800_rfcsr_write(rt2x00dev, 29, 0x8f); - rt2800_rfcsr_write(rt2x00dev, 30, 0x20); - rt2800_rfcsr_write(rt2x00dev, 31, 0x0f); - } else if (rt2x00_rt(rt2x00dev, RT3572)) { - rt2800_rfcsr_write(rt2x00dev, 0, 0x70); - rt2800_rfcsr_write(rt2x00dev, 1, 0x81); - rt2800_rfcsr_write(rt2x00dev, 2, 0xf1); - rt2800_rfcsr_write(rt2x00dev, 3, 0x02); - rt2800_rfcsr_write(rt2x00dev, 4, 0x4c); - rt2800_rfcsr_write(rt2x00dev, 5, 0x05); - rt2800_rfcsr_write(rt2x00dev, 6, 0x4a); - rt2800_rfcsr_write(rt2x00dev, 7, 0xd8); - rt2800_rfcsr_write(rt2x00dev, 9, 0xc3); - rt2800_rfcsr_write(rt2x00dev, 10, 0xf1); - rt2800_rfcsr_write(rt2x00dev, 11, 0xb9); - rt2800_rfcsr_write(rt2x00dev, 12, 0x70); - rt2800_rfcsr_write(rt2x00dev, 13, 0x65); - rt2800_rfcsr_write(rt2x00dev, 14, 0xa0); - rt2800_rfcsr_write(rt2x00dev, 15, 0x53); - rt2800_rfcsr_write(rt2x00dev, 16, 0x4c); - rt2800_rfcsr_write(rt2x00dev, 17, 0x23); - rt2800_rfcsr_write(rt2x00dev, 18, 0xac); - rt2800_rfcsr_write(rt2x00dev, 19, 0x93); - rt2800_rfcsr_write(rt2x00dev, 20, 0xb3); - rt2800_rfcsr_write(rt2x00dev, 21, 0xd0); - rt2800_rfcsr_write(rt2x00dev, 22, 0x00); - rt2800_rfcsr_write(rt2x00dev, 23, 0x3c); - rt2800_rfcsr_write(rt2x00dev, 24, 0x16); - rt2800_rfcsr_write(rt2x00dev, 25, 0x15); - rt2800_rfcsr_write(rt2x00dev, 26, 0x85); - rt2800_rfcsr_write(rt2x00dev, 27, 0x00); - rt2800_rfcsr_write(rt2x00dev, 28, 0x00); - rt2800_rfcsr_write(rt2x00dev, 29, 0x9b); - rt2800_rfcsr_write(rt2x00dev, 30, 0x09); - rt2800_rfcsr_write(rt2x00dev, 31, 0x10); - } else if (rt2800_is_305x_soc(rt2x00dev)) { - rt2800_rfcsr_write(rt2x00dev, 0, 0x50); - rt2800_rfcsr_write(rt2x00dev, 1, 0x01); - rt2800_rfcsr_write(rt2x00dev, 2, 0xf7); - rt2800_rfcsr_write(rt2x00dev, 3, 0x75); - rt2800_rfcsr_write(rt2x00dev, 4, 0x40); - rt2800_rfcsr_write(rt2x00dev, 5, 0x03); - rt2800_rfcsr_write(rt2x00dev, 6, 0x02); - rt2800_rfcsr_write(rt2x00dev, 7, 0x50); - rt2800_rfcsr_write(rt2x00dev, 8, 0x39); - rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); - rt2800_rfcsr_write(rt2x00dev, 10, 0x60); - rt2800_rfcsr_write(rt2x00dev, 11, 0x21); - rt2800_rfcsr_write(rt2x00dev, 12, 0x75); - rt2800_rfcsr_write(rt2x00dev, 13, 0x75); - rt2800_rfcsr_write(rt2x00dev, 14, 0x90); - rt2800_rfcsr_write(rt2x00dev, 15, 0x58); - rt2800_rfcsr_write(rt2x00dev, 16, 0xb3); - rt2800_rfcsr_write(rt2x00dev, 17, 0x92); - rt2800_rfcsr_write(rt2x00dev, 18, 0x2c); - rt2800_rfcsr_write(rt2x00dev, 19, 0x02); - rt2800_rfcsr_write(rt2x00dev, 20, 0xba); - rt2800_rfcsr_write(rt2x00dev, 21, 0xdb); - rt2800_rfcsr_write(rt2x00dev, 22, 0x00); - rt2800_rfcsr_write(rt2x00dev, 23, 0x31); - rt2800_rfcsr_write(rt2x00dev, 24, 0x08); - rt2800_rfcsr_write(rt2x00dev, 25, 0x01); - rt2800_rfcsr_write(rt2x00dev, 26, 0x25); - rt2800_rfcsr_write(rt2x00dev, 27, 0x23); - rt2800_rfcsr_write(rt2x00dev, 28, 0x13); - rt2800_rfcsr_write(rt2x00dev, 29, 0x83); - rt2800_rfcsr_write(rt2x00dev, 30, 0x00); - rt2800_rfcsr_write(rt2x00dev, 31, 0x00); + if (rt2800_is_305x_soc(rt2x00dev)) { + rt2800_init_rfcsr_305x_soc(rt2x00dev); return 0; - } else if (rt2x00_rt(rt2x00dev, RT3352)) { - rt2800_rfcsr_write(rt2x00dev, 0, 0xf0); - rt2800_rfcsr_write(rt2x00dev, 1, 0x23); - rt2800_rfcsr_write(rt2x00dev, 2, 0x50); - rt2800_rfcsr_write(rt2x00dev, 3, 0x18); - rt2800_rfcsr_write(rt2x00dev, 4, 0x00); - rt2800_rfcsr_write(rt2x00dev, 5, 0x00); - rt2800_rfcsr_write(rt2x00dev, 6, 0x33); - rt2800_rfcsr_write(rt2x00dev, 7, 0x00); - rt2800_rfcsr_write(rt2x00dev, 8, 0xf1); - rt2800_rfcsr_write(rt2x00dev, 9, 0x02); - rt2800_rfcsr_write(rt2x00dev, 10, 0xd2); - rt2800_rfcsr_write(rt2x00dev, 11, 0x42); - rt2800_rfcsr_write(rt2x00dev, 12, 0x1c); - rt2800_rfcsr_write(rt2x00dev, 13, 0x00); - rt2800_rfcsr_write(rt2x00dev, 14, 0x5a); - rt2800_rfcsr_write(rt2x00dev, 15, 0x00); - rt2800_rfcsr_write(rt2x00dev, 16, 0x01); - rt2800_rfcsr_write(rt2x00dev, 18, 0x45); - rt2800_rfcsr_write(rt2x00dev, 19, 0x02); - rt2800_rfcsr_write(rt2x00dev, 20, 0x00); - rt2800_rfcsr_write(rt2x00dev, 21, 0x00); - rt2800_rfcsr_write(rt2x00dev, 22, 0x00); - rt2800_rfcsr_write(rt2x00dev, 23, 0x00); - rt2800_rfcsr_write(rt2x00dev, 24, 0x00); - rt2800_rfcsr_write(rt2x00dev, 25, 0x80); - rt2800_rfcsr_write(rt2x00dev, 26, 0x00); - rt2800_rfcsr_write(rt2x00dev, 27, 0x03); - rt2800_rfcsr_write(rt2x00dev, 28, 0x03); - rt2800_rfcsr_write(rt2x00dev, 29, 0x00); - rt2800_rfcsr_write(rt2x00dev, 30, 0x10); - rt2800_rfcsr_write(rt2x00dev, 31, 0x80); - rt2800_rfcsr_write(rt2x00dev, 32, 0x80); - rt2800_rfcsr_write(rt2x00dev, 33, 0x00); - rt2800_rfcsr_write(rt2x00dev, 34, 0x01); - rt2800_rfcsr_write(rt2x00dev, 35, 0x03); - rt2800_rfcsr_write(rt2x00dev, 36, 0xbd); - rt2800_rfcsr_write(rt2x00dev, 37, 0x3c); - rt2800_rfcsr_write(rt2x00dev, 38, 0x5f); - rt2800_rfcsr_write(rt2x00dev, 39, 0xc5); - rt2800_rfcsr_write(rt2x00dev, 40, 0x33); - rt2800_rfcsr_write(rt2x00dev, 41, 0x5b); - rt2800_rfcsr_write(rt2x00dev, 42, 0x5b); - rt2800_rfcsr_write(rt2x00dev, 43, 0xdb); - rt2800_rfcsr_write(rt2x00dev, 44, 0xdb); - rt2800_rfcsr_write(rt2x00dev, 45, 0xdb); - rt2800_rfcsr_write(rt2x00dev, 46, 0xdd); - rt2800_rfcsr_write(rt2x00dev, 47, 0x0d); - rt2800_rfcsr_write(rt2x00dev, 48, 0x14); - rt2800_rfcsr_write(rt2x00dev, 49, 0x00); - rt2800_rfcsr_write(rt2x00dev, 50, 0x2d); - rt2800_rfcsr_write(rt2x00dev, 51, 0x7f); - rt2800_rfcsr_write(rt2x00dev, 52, 0x00); - rt2800_rfcsr_write(rt2x00dev, 53, 0x52); - rt2800_rfcsr_write(rt2x00dev, 54, 0x1b); - rt2800_rfcsr_write(rt2x00dev, 55, 0x7f); - rt2800_rfcsr_write(rt2x00dev, 56, 0x00); - rt2800_rfcsr_write(rt2x00dev, 57, 0x52); - rt2800_rfcsr_write(rt2x00dev, 58, 0x1b); - rt2800_rfcsr_write(rt2x00dev, 59, 0x00); - rt2800_rfcsr_write(rt2x00dev, 60, 0x00); - rt2800_rfcsr_write(rt2x00dev, 61, 0x00); - rt2800_rfcsr_write(rt2x00dev, 62, 0x00); - rt2800_rfcsr_write(rt2x00dev, 63, 0x00); - } else if (rt2x00_rt(rt2x00dev, RT5390)) { - rt2800_rfcsr_write(rt2x00dev, 1, 0x0f); - rt2800_rfcsr_write(rt2x00dev, 2, 0x80); - rt2800_rfcsr_write(rt2x00dev, 3, 0x88); - rt2800_rfcsr_write(rt2x00dev, 5, 0x10); - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) - rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); - else - rt2800_rfcsr_write(rt2x00dev, 6, 0xa0); - rt2800_rfcsr_write(rt2x00dev, 7, 0x00); - rt2800_rfcsr_write(rt2x00dev, 10, 0x53); - rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); - rt2800_rfcsr_write(rt2x00dev, 12, 0xc6); - rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); - rt2800_rfcsr_write(rt2x00dev, 14, 0x00); - rt2800_rfcsr_write(rt2x00dev, 15, 0x00); - rt2800_rfcsr_write(rt2x00dev, 16, 0x00); - rt2800_rfcsr_write(rt2x00dev, 18, 0x03); - rt2800_rfcsr_write(rt2x00dev, 19, 0x00); - - rt2800_rfcsr_write(rt2x00dev, 20, 0x00); - rt2800_rfcsr_write(rt2x00dev, 21, 0x00); - rt2800_rfcsr_write(rt2x00dev, 22, 0x20); - rt2800_rfcsr_write(rt2x00dev, 23, 0x00); - rt2800_rfcsr_write(rt2x00dev, 24, 0x00); - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) - rt2800_rfcsr_write(rt2x00dev, 25, 0x80); - else - rt2800_rfcsr_write(rt2x00dev, 25, 0xc0); - rt2800_rfcsr_write(rt2x00dev, 26, 0x00); - rt2800_rfcsr_write(rt2x00dev, 27, 0x09); - rt2800_rfcsr_write(rt2x00dev, 28, 0x00); - rt2800_rfcsr_write(rt2x00dev, 29, 0x10); - - rt2800_rfcsr_write(rt2x00dev, 30, 0x00); - rt2800_rfcsr_write(rt2x00dev, 31, 0x80); - rt2800_rfcsr_write(rt2x00dev, 32, 0x80); - rt2800_rfcsr_write(rt2x00dev, 33, 0x00); - rt2800_rfcsr_write(rt2x00dev, 34, 0x07); - rt2800_rfcsr_write(rt2x00dev, 35, 0x12); - rt2800_rfcsr_write(rt2x00dev, 36, 0x00); - rt2800_rfcsr_write(rt2x00dev, 37, 0x08); - rt2800_rfcsr_write(rt2x00dev, 38, 0x85); - rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); - - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) - rt2800_rfcsr_write(rt2x00dev, 40, 0x0b); - else - rt2800_rfcsr_write(rt2x00dev, 40, 0x4b); - rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); - rt2800_rfcsr_write(rt2x00dev, 42, 0xd2); - rt2800_rfcsr_write(rt2x00dev, 43, 0x9a); - rt2800_rfcsr_write(rt2x00dev, 44, 0x0e); - rt2800_rfcsr_write(rt2x00dev, 45, 0xa2); - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) - rt2800_rfcsr_write(rt2x00dev, 46, 0x73); - else - rt2800_rfcsr_write(rt2x00dev, 46, 0x7b); - rt2800_rfcsr_write(rt2x00dev, 47, 0x00); - rt2800_rfcsr_write(rt2x00dev, 48, 0x10); - rt2800_rfcsr_write(rt2x00dev, 49, 0x94); - - rt2800_rfcsr_write(rt2x00dev, 52, 0x38); - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) - rt2800_rfcsr_write(rt2x00dev, 53, 0x00); - else - rt2800_rfcsr_write(rt2x00dev, 53, 0x84); - rt2800_rfcsr_write(rt2x00dev, 54, 0x78); - rt2800_rfcsr_write(rt2x00dev, 55, 0x44); - rt2800_rfcsr_write(rt2x00dev, 56, 0x22); - rt2800_rfcsr_write(rt2x00dev, 57, 0x80); - rt2800_rfcsr_write(rt2x00dev, 58, 0x7f); - rt2800_rfcsr_write(rt2x00dev, 59, 0x63); - - rt2800_rfcsr_write(rt2x00dev, 60, 0x45); - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) - rt2800_rfcsr_write(rt2x00dev, 61, 0xd1); - else - rt2800_rfcsr_write(rt2x00dev, 61, 0xdd); - rt2800_rfcsr_write(rt2x00dev, 62, 0x00); - rt2800_rfcsr_write(rt2x00dev, 63, 0x00); - } else if (rt2x00_rt(rt2x00dev, RT5392)) { - rt2800_rfcsr_write(rt2x00dev, 1, 0x17); - rt2800_rfcsr_write(rt2x00dev, 2, 0x80); - rt2800_rfcsr_write(rt2x00dev, 3, 0x88); - rt2800_rfcsr_write(rt2x00dev, 5, 0x10); - rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); - rt2800_rfcsr_write(rt2x00dev, 7, 0x00); - rt2800_rfcsr_write(rt2x00dev, 10, 0x53); - rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); - rt2800_rfcsr_write(rt2x00dev, 12, 0x46); - rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); - rt2800_rfcsr_write(rt2x00dev, 14, 0x00); - rt2800_rfcsr_write(rt2x00dev, 15, 0x00); - rt2800_rfcsr_write(rt2x00dev, 16, 0x00); - rt2800_rfcsr_write(rt2x00dev, 18, 0x03); - rt2800_rfcsr_write(rt2x00dev, 19, 0x4d); - rt2800_rfcsr_write(rt2x00dev, 20, 0x00); - rt2800_rfcsr_write(rt2x00dev, 21, 0x8d); - rt2800_rfcsr_write(rt2x00dev, 22, 0x20); - rt2800_rfcsr_write(rt2x00dev, 23, 0x0b); - rt2800_rfcsr_write(rt2x00dev, 24, 0x44); - rt2800_rfcsr_write(rt2x00dev, 25, 0x80); - rt2800_rfcsr_write(rt2x00dev, 26, 0x82); - rt2800_rfcsr_write(rt2x00dev, 27, 0x09); - rt2800_rfcsr_write(rt2x00dev, 28, 0x00); - rt2800_rfcsr_write(rt2x00dev, 29, 0x10); - rt2800_rfcsr_write(rt2x00dev, 30, 0x10); - rt2800_rfcsr_write(rt2x00dev, 31, 0x80); - rt2800_rfcsr_write(rt2x00dev, 32, 0x20); - rt2800_rfcsr_write(rt2x00dev, 33, 0xC0); - rt2800_rfcsr_write(rt2x00dev, 34, 0x07); - rt2800_rfcsr_write(rt2x00dev, 35, 0x12); - rt2800_rfcsr_write(rt2x00dev, 36, 0x00); - rt2800_rfcsr_write(rt2x00dev, 37, 0x08); - rt2800_rfcsr_write(rt2x00dev, 38, 0x89); - rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); - rt2800_rfcsr_write(rt2x00dev, 40, 0x0f); - rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); - rt2800_rfcsr_write(rt2x00dev, 42, 0xd5); - rt2800_rfcsr_write(rt2x00dev, 43, 0x9b); - rt2800_rfcsr_write(rt2x00dev, 44, 0x0e); - rt2800_rfcsr_write(rt2x00dev, 45, 0xa2); - rt2800_rfcsr_write(rt2x00dev, 46, 0x73); - rt2800_rfcsr_write(rt2x00dev, 47, 0x0c); - rt2800_rfcsr_write(rt2x00dev, 48, 0x10); - rt2800_rfcsr_write(rt2x00dev, 49, 0x94); - rt2800_rfcsr_write(rt2x00dev, 50, 0x94); - rt2800_rfcsr_write(rt2x00dev, 51, 0x3a); - rt2800_rfcsr_write(rt2x00dev, 52, 0x48); - rt2800_rfcsr_write(rt2x00dev, 53, 0x44); - rt2800_rfcsr_write(rt2x00dev, 54, 0x38); - rt2800_rfcsr_write(rt2x00dev, 55, 0x43); - rt2800_rfcsr_write(rt2x00dev, 56, 0xa1); - rt2800_rfcsr_write(rt2x00dev, 57, 0x00); - rt2800_rfcsr_write(rt2x00dev, 58, 0x39); - rt2800_rfcsr_write(rt2x00dev, 59, 0x07); - rt2800_rfcsr_write(rt2x00dev, 60, 0x45); - rt2800_rfcsr_write(rt2x00dev, 61, 0x91); - rt2800_rfcsr_write(rt2x00dev, 62, 0x39); - rt2800_rfcsr_write(rt2x00dev, 63, 0x07); + } + + switch (rt2x00dev->chip.rt) { + case RT3070: + case RT3071: + case RT3090: + rt2800_init_rfcsr_30xx(rt2x00dev); + break; + case RT3290: + rt2800_init_rfcsr_3290(rt2x00dev); + break; + case RT3352: + rt2800_init_rfcsr_3352(rt2x00dev); + break; + case RT3390: + rt2800_init_rfcsr_3390(rt2x00dev); + break; + case RT3572: + rt2800_init_rfcsr_3572(rt2x00dev); + break; + case RT5390: + rt2800_init_rfcsr_5390(rt2x00dev); + break; + case RT5392: + rt2800_init_rfcsr_5392(rt2x00dev); + break; } if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) { @@ -4620,12 +4670,14 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i) mutex_unlock(&rt2x00dev->csr_mutex); } -void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) +int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) { unsigned int i; for (i = 0; i < EEPROM_SIZE / sizeof(u16); i += 8) rt2800_efuse_read(rt2x00dev, i); + + return 0; } EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse); @@ -4635,11 +4687,14 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) u16 word; u8 *mac; u8 default_lna_gain; + int retval; /* * Read the EEPROM. */ - rt2800_read_eeprom(rt2x00dev); + retval = rt2800_read_eeprom(rt2x00dev); + if (retval) + return retval; /* * Start validation of the data that has been read. @@ -5090,8 +5145,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_PS_NULLFUNC_STACK | IEEE80211_HW_AMPDU_AGGREGATION | - IEEE80211_HW_REPORTS_TX_ACK_STATUS | - IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL; + IEEE80211_HW_REPORTS_TX_ACK_STATUS; /* * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices @@ -5484,7 +5538,9 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_START: ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; - case IEEE80211_AMPDU_TX_STOP: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h index a128ceadcb3e..6ec739466db4 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/rt2x00/rt2800lib.h @@ -43,7 +43,7 @@ struct rt2800_ops { const unsigned int offset, const struct rt2x00_field32 field, u32 *reg); - void (*read_eeprom)(struct rt2x00_dev *rt2x00dev); + int (*read_eeprom)(struct rt2x00_dev *rt2x00dev); bool (*hwcrypt_disabled)(struct rt2x00_dev *rt2x00dev); int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev, @@ -117,11 +117,11 @@ static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev, return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg); } -static inline void rt2800_read_eeprom(struct rt2x00_dev *rt2x00dev) +static inline int rt2800_read_eeprom(struct rt2x00_dev *rt2x00dev) { const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv; - rt2800ops->read_eeprom(rt2x00dev); + return rt2800ops->read_eeprom(rt2x00dev); } static inline bool rt2800_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev) @@ -207,7 +207,7 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev); void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev); int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev); -void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev); +int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev); int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev); diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 9224d874bf24..48a01aa21f1c 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -90,17 +90,22 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token) } #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) -static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) +static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) { void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); + if (!base_addr) + return -ENOMEM; + memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE); iounmap(base_addr); + return 0; } #else -static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) +static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) { + return -ENOMEM; } #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ @@ -135,7 +140,7 @@ static void rt2800pci_eepromregister_write(struct eeprom_93cx6 *eeprom) rt2x00pci_register_write(rt2x00dev, E2PROM_CSR, reg); } -static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev) +static int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev) { struct eeprom_93cx6 eeprom; u32 reg; @@ -164,6 +169,8 @@ static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev) eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, EEPROM_SIZE / sizeof(u16)); + + return 0; } static int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev) @@ -171,13 +178,14 @@ static int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev) return rt2800_efuse_detect(rt2x00dev); } -static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) +static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) { - rt2800_read_eeprom_efuse(rt2x00dev); + return rt2800_read_eeprom_efuse(rt2x00dev); } #else -static inline void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev) +static inline int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev) { + return -EOPNOTSUPP; } static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev) @@ -185,8 +193,9 @@ static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev) return 0; } -static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) +static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) { + return -EOPNOTSUPP; } #endif /* CONFIG_PCI */ @@ -970,14 +979,18 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance) /* * Device probe functions. */ -static void rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev) +static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev) { + int retval; + if (rt2x00_is_soc(rt2x00dev)) - rt2800pci_read_eeprom_soc(rt2x00dev); + retval = rt2800pci_read_eeprom_soc(rt2x00dev); else if (rt2800pci_efuse_detect(rt2x00dev)) - rt2800pci_read_eeprom_efuse(rt2x00dev); + retval = rt2800pci_read_eeprom_efuse(rt2x00dev); else - rt2800pci_read_eeprom_pci(rt2x00dev); + retval = rt2800pci_read_eeprom_pci(rt2x00dev); + + return retval; } static const struct ieee80211_ops rt2800pci_mac80211_ops = { @@ -1139,6 +1152,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { { PCI_DEVICE(0x1814, 0x3562) }, { PCI_DEVICE(0x1814, 0x3592) }, { PCI_DEVICE(0x1814, 0x3593) }, + { PCI_DEVICE(0x1814, 0x359f) }, #endif #ifdef CONFIG_RT2800PCI_RT53XX { PCI_DEVICE(0x1814, 0x5360) }, diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 5c149b58ab46..098613ed93fb 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -540,9 +540,9 @@ rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg) tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID); if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) { - WARNING(entry->queue->rt2x00dev, - "TX status report missed for queue %d entry %d\n", - entry->queue->qid, entry->entry_idx); + DEBUG(entry->queue->rt2x00dev, + "TX status report missed for queue %d entry %d\n", + entry->queue->qid, entry->entry_idx); return TXDONE_UNKNOWN; } @@ -735,13 +735,17 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, /* * Device probe functions. */ -static void rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev) +static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev) { + int retval; + if (rt2800_efuse_detect(rt2x00dev)) - rt2800_read_eeprom_efuse(rt2x00dev); + retval = rt2800_read_eeprom_efuse(rt2x00dev); else - rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, - EEPROM_SIZE); + retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, + EEPROM_SIZE); + + return retval; } static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) @@ -964,6 +968,7 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x07d1, 0x3c13) }, { USB_DEVICE(0x07d1, 0x3c15) }, { USB_DEVICE(0x07d1, 0x3c16) }, + { USB_DEVICE(0x07d1, 0x3c17) }, { USB_DEVICE(0x2001, 0x3c1b) }, /* Draytek */ { USB_DEVICE(0x07fa, 0x7712) }, @@ -1094,9 +1099,11 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x15a9, 0x0006) }, /* Sweex */ { USB_DEVICE(0x177f, 0x0153) }, + { USB_DEVICE(0x177f, 0x0164) }, { USB_DEVICE(0x177f, 0x0302) }, { USB_DEVICE(0x177f, 0x0313) }, { USB_DEVICE(0x177f, 0x0323) }, + { USB_DEVICE(0x177f, 0x0324) }, /* U-Media */ { USB_DEVICE(0x157e, 0x300e) }, { USB_DEVICE(0x157e, 0x3013) }, @@ -1111,6 +1118,7 @@ static struct usb_device_id rt2800usb_device_table[] = { /* Zyxel */ { USB_DEVICE(0x0586, 0x3416) }, { USB_DEVICE(0x0586, 0x3418) }, + { USB_DEVICE(0x0586, 0x341a) }, { USB_DEVICE(0x0586, 0x341e) }, { USB_DEVICE(0x0586, 0x343e) }, #ifdef CONFIG_RT2800USB_RT33XX @@ -1127,6 +1135,9 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x148f, 0x8070) }, /* Sitecom */ { USB_DEVICE(0x0df6, 0x0050) }, + /* Sweex */ + { USB_DEVICE(0x177f, 0x0163) }, + { USB_DEVICE(0x177f, 0x0165) }, #endif #ifdef CONFIG_RT2800USB_RT35XX /* Allwin */ @@ -1162,6 +1173,7 @@ static struct usb_device_id rt2800usb_device_table[] = { #ifdef CONFIG_RT2800USB_RT53XX /* Arcadyan */ { USB_DEVICE(0x043e, 0x7a12) }, + { USB_DEVICE(0x043e, 0x7a32) }, /* Azurewave */ { USB_DEVICE(0x13d3, 0x3329) }, { USB_DEVICE(0x13d3, 0x3365) }, @@ -1173,16 +1185,20 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x2001, 0x3c1e) }, /* LG innotek */ { USB_DEVICE(0x043e, 0x7a22) }, + { USB_DEVICE(0x043e, 0x7a42) }, /* Panasonic */ { USB_DEVICE(0x04da, 0x1801) }, { USB_DEVICE(0x04da, 0x1800) }, + { USB_DEVICE(0x04da, 0x23f6) }, /* Philips */ { USB_DEVICE(0x0471, 0x2104) }, + { USB_DEVICE(0x0471, 0x2126) }, + { USB_DEVICE(0x0471, 0x2180) }, + { USB_DEVICE(0x0471, 0x2181) }, + { USB_DEVICE(0x0471, 0x2182) }, /* Ralink */ { USB_DEVICE(0x148f, 0x5370) }, { USB_DEVICE(0x148f, 0x5372) }, - /* Unknown */ - { USB_DEVICE(0x04da, 0x23f6) }, #endif #ifdef CONFIG_RT2800USB_UNKNOWN /* @@ -1203,10 +1219,15 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0b05, 0x1760) }, { USB_DEVICE(0x0b05, 0x1761) }, { USB_DEVICE(0x0b05, 0x1790) }, + { USB_DEVICE(0x0b05, 0x17a7) }, /* AzureWave */ { USB_DEVICE(0x13d3, 0x3262) }, { USB_DEVICE(0x13d3, 0x3284) }, { USB_DEVICE(0x13d3, 0x3322) }, + { USB_DEVICE(0x13d3, 0x3340) }, + { USB_DEVICE(0x13d3, 0x3399) }, + { USB_DEVICE(0x13d3, 0x3400) }, + { USB_DEVICE(0x13d3, 0x3401) }, /* Belkin */ { USB_DEVICE(0x050d, 0x1003) }, /* Buffalo */ @@ -1219,13 +1240,17 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x18c5, 0x0008) }, /* D-Link */ { USB_DEVICE(0x07d1, 0x3c0b) }, - { USB_DEVICE(0x07d1, 0x3c17) }, /* Encore */ { USB_DEVICE(0x203d, 0x14a1) }, + /* EnGenius */ + { USB_DEVICE(0x1740, 0x0600) }, + { USB_DEVICE(0x1740, 0x0602) }, /* Gemtek */ { USB_DEVICE(0x15a9, 0x0010) }, /* Gigabyte */ { USB_DEVICE(0x1044, 0x800c) }, + /* Hercules */ + { USB_DEVICE(0x06f8, 0xe036) }, /* Huawei */ { USB_DEVICE(0x148f, 0xf101) }, /* I-O DATA */ @@ -1252,13 +1277,17 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0df6, 0x004a) }, { USB_DEVICE(0x0df6, 0x004d) }, { USB_DEVICE(0x0df6, 0x0053) }, + { USB_DEVICE(0x0df6, 0x0069) }, + { USB_DEVICE(0x0df6, 0x006f) }, /* SMC */ { USB_DEVICE(0x083a, 0xa512) }, { USB_DEVICE(0x083a, 0xc522) }, { USB_DEVICE(0x083a, 0xd522) }, { USB_DEVICE(0x083a, 0xf511) }, - /* Zyxel */ - { USB_DEVICE(0x0586, 0x341a) }, + /* Sweex */ + { USB_DEVICE(0x177f, 0x0254) }, + /* TP-LINK */ + { USB_DEVICE(0xf201, 0x5370) }, #endif { 0, } }; diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 0751b35ef6dc..086abb403a4f 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -88,11 +88,9 @@ #define ERROR_PROBE(__msg, __args...) \ DEBUG_PRINTK_PROBE(KERN_ERR, "Error", __msg, ##__args) #define WARNING(__dev, __msg, __args...) \ - DEBUG_PRINTK(__dev, KERN_WARNING, "Warning", __msg, ##__args) -#define NOTICE(__dev, __msg, __args...) \ - DEBUG_PRINTK(__dev, KERN_NOTICE, "Notice", __msg, ##__args) + DEBUG_PRINTK_MSG(__dev, KERN_WARNING, "Warning", __msg, ##__args) #define INFO(__dev, __msg, __args...) \ - DEBUG_PRINTK(__dev, KERN_INFO, "Info", __msg, ##__args) + DEBUG_PRINTK_MSG(__dev, KERN_INFO, "Info", __msg, ##__args) #define DEBUG(__dev, __msg, __args...) \ DEBUG_PRINTK(__dev, KERN_DEBUG, "Debug", __msg, ##__args) #define EEPROM(__dev, __msg, __args...) \ @@ -1016,6 +1014,26 @@ struct rt2x00_dev { * Protect the interrupt mask register. */ spinlock_t irqmask_lock; + + /* + * List of BlockAckReq TX entries that need driver BlockAck processing. + */ + struct list_head bar_list; + spinlock_t bar_list_lock; +}; + +struct rt2x00_bar_list_entry { + struct list_head list; + struct rcu_head head; + + struct queue_entry *entry; + int block_acked; + + /* Relevant parts of the IEEE80211 BAR header */ + __u8 ra[6]; + __u8 ta[6]; + __le16 control; + __le16 start_seq_num; }; /* @@ -1151,8 +1169,10 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev) /** * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes. * @entry: Pointer to &struct queue_entry + * + * Returns -ENOMEM if mapping fail, 0 otherwise. */ -void rt2x00queue_map_txskb(struct queue_entry *entry); +int rt2x00queue_map_txskb(struct queue_entry *entry); /** * rt2x00queue_unmap_skb - Unmap a skb from DMA. diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 44f8b3f3cbed..1031db66474a 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -271,6 +271,50 @@ void rt2x00lib_dmadone(struct queue_entry *entry) } EXPORT_SYMBOL_GPL(rt2x00lib_dmadone); +static inline int rt2x00lib_txdone_bar_status(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct ieee80211_bar *bar = (void *) entry->skb->data; + struct rt2x00_bar_list_entry *bar_entry; + int ret; + + if (likely(!ieee80211_is_back_req(bar->frame_control))) + return 0; + + /* + * Unlike all other frames, the status report for BARs does + * not directly come from the hardware as it is incapable of + * matching a BA to a previously send BAR. The hardware will + * report all BARs as if they weren't acked at all. + * + * Instead the RX-path will scan for incoming BAs and set the + * block_acked flag if it sees one that was likely caused by + * a BAR from us. + * + * Remove remaining BARs here and return their status for + * TX done processing. + */ + ret = 0; + rcu_read_lock(); + list_for_each_entry_rcu(bar_entry, &rt2x00dev->bar_list, list) { + if (bar_entry->entry != entry) + continue; + + spin_lock_bh(&rt2x00dev->bar_list_lock); + /* Return whether this BAR was blockacked or not */ + ret = bar_entry->block_acked; + /* Remove the BAR from our checklist */ + list_del_rcu(&bar_entry->list); + spin_unlock_bh(&rt2x00dev->bar_list_lock); + kfree_rcu(bar_entry, head); + + break; + } + rcu_read_unlock(); + + return ret; +} + void rt2x00lib_txdone(struct queue_entry *entry, struct txdone_entry_desc *txdesc) { @@ -324,9 +368,12 @@ void rt2x00lib_txdone(struct queue_entry *entry, rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb); /* - * Determine if the frame has been successfully transmitted. + * Determine if the frame has been successfully transmitted and + * remove BARs from our check list while checking for their + * TX status. */ success = + rt2x00lib_txdone_bar_status(entry) || test_bit(TXDONE_SUCCESS, &txdesc->flags) || test_bit(TXDONE_UNKNOWN, &txdesc->flags); @@ -491,6 +538,50 @@ static void rt2x00lib_sleep(struct work_struct *work) IEEE80211_CONF_CHANGE_PS); } +static void rt2x00lib_rxdone_check_ba(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb, + struct rxdone_entry_desc *rxdesc) +{ + struct rt2x00_bar_list_entry *entry; + struct ieee80211_bar *ba = (void *)skb->data; + + if (likely(!ieee80211_is_back(ba->frame_control))) + return; + + if (rxdesc->size < sizeof(*ba) + FCS_LEN) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(entry, &rt2x00dev->bar_list, list) { + + if (ba->start_seq_num != entry->start_seq_num) + continue; + +#define TID_CHECK(a, b) ( \ + ((a) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK)) == \ + ((b) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK))) \ + + if (!TID_CHECK(ba->control, entry->control)) + continue; + +#undef TID_CHECK + + if (compare_ether_addr(ba->ra, entry->ta)) + continue; + + if (compare_ether_addr(ba->ta, entry->ra)) + continue; + + /* Mark BAR since we received the according BA */ + spin_lock_bh(&rt2x00dev->bar_list_lock); + entry->block_acked = 1; + spin_unlock_bh(&rt2x00dev->bar_list_lock); + break; + } + rcu_read_unlock(); + +} + static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct rxdone_entry_desc *rxdesc) @@ -674,6 +765,12 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp) rt2x00lib_rxdone_check_ps(rt2x00dev, entry->skb, &rxdesc); /* + * Check for incoming BlockAcks to match to the BlockAckReqs + * we've send out. + */ + rt2x00lib_rxdone_check_ba(rt2x00dev, entry->skb, &rxdesc); + + /* * Update extra components */ rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc); @@ -1139,7 +1236,8 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev) */ if_limit = &rt2x00dev->if_limits_ap; if_limit->max = rt2x00dev->ops->max_ap_intf; - if_limit->types = BIT(NL80211_IFTYPE_AP); + if_limit->types = BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT); /* * Build up AP interface combinations structure. @@ -1183,6 +1281,8 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) spin_lock_init(&rt2x00dev->irqmask_lock); mutex_init(&rt2x00dev->csr_mutex); + INIT_LIST_HEAD(&rt2x00dev->bar_list); + spin_lock_init(&rt2x00dev->bar_list_lock); set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); @@ -1347,7 +1447,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev); #ifdef CONFIG_PM int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state) { - NOTICE(rt2x00dev, "Going to sleep.\n"); + DEBUG(rt2x00dev, "Going to sleep.\n"); /* * Prevent mac80211 from accessing driver while suspended. @@ -1387,7 +1487,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_suspend); int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev) { - NOTICE(rt2x00dev, "Waking up.\n"); + DEBUG(rt2x00dev, "Waking up.\n"); /* * Restore/enable extra components. diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index ed7a1bb3f245..20c6eccce5aa 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -731,9 +731,9 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw, queue->aifs = params->aifs; queue->txop = params->txop; - INFO(rt2x00dev, - "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d.\n", - queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop); + DEBUG(rt2x00dev, + "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d.\n", + queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop); return 0; } diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index e488b944a034..4d91795dc6a2 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -87,24 +87,35 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) skbdesc->entry = entry; if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) { - skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, - skb->data, - skb->len, - DMA_FROM_DEVICE); + dma_addr_t skb_dma; + + skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) { + dev_kfree_skb_any(skb); + return NULL; + } + + skbdesc->skb_dma = skb_dma; skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; } return skb; } -void rt2x00queue_map_txskb(struct queue_entry *entry) +int rt2x00queue_map_txskb(struct queue_entry *entry) { struct device *dev = entry->queue->rt2x00dev->dev; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); skbdesc->skb_dma = dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma))) + return -ENOMEM; + skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; + return 0; } EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); @@ -343,10 +354,7 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, * when using more then one tx stream (>MCS7). */ if (sta && txdesc->u.ht.mcs > 7 && - ((sta->ht_cap.cap & - IEEE80211_HT_CAP_SM_PS) >> - IEEE80211_HT_CAP_SM_PS_SHIFT) == - WLAN_HT_CAP_SM_PS_DYNAMIC) + sta->smps_mode == IEEE80211_SMPS_DYNAMIC) __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); } else { txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); @@ -545,8 +553,9 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry, /* * Map the skb to DMA. */ - if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) - rt2x00queue_map_txskb(entry); + if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) && + rt2x00queue_map_txskb(entry)) + return -ENOMEM; return 0; } @@ -582,6 +591,48 @@ static void rt2x00queue_kick_tx_queue(struct data_queue *queue, queue->rt2x00dev->ops->lib->kick_queue(queue); } +static void rt2x00queue_bar_check(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct ieee80211_bar *bar = (void *) (entry->skb->data + + rt2x00dev->ops->extra_tx_headroom); + struct rt2x00_bar_list_entry *bar_entry; + + if (likely(!ieee80211_is_back_req(bar->frame_control))) + return; + + bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC); + + /* + * If the alloc fails we still send the BAR out but just don't track + * it in our bar list. And as a result we will report it to mac80211 + * back as failed. + */ + if (!bar_entry) + return; + + bar_entry->entry = entry; + bar_entry->block_acked = 0; + + /* + * Copy the relevant parts of the 802.11 BAR into out check list + * such that we can use RCU for less-overhead in the RX path since + * sending BARs and processing the according BlockAck should be + * the exception. + */ + memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra)); + memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta)); + bar_entry->control = bar->control; + bar_entry->start_seq_num = bar->start_seq_num; + + /* + * Insert BAR into our BAR check list. + */ + spin_lock_bh(&rt2x00dev->bar_list_lock); + list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list); + spin_unlock_bh(&rt2x00dev->bar_list_lock); +} + int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, bool local) { @@ -680,6 +731,11 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, goto out; } + /* + * Put BlockAckReqs into our check list for driver BA processing. + */ + rt2x00queue_bar_check(entry); + set_bit(ENTRY_DATA_PENDING, &entry->flags); rt2x00queue_index_inc(entry, Q_INDEX); diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig index b80bc4612581..b6aa0c40658f 100644 --- a/drivers/net/wireless/rtlwifi/Kconfig +++ b/drivers/net/wireless/rtlwifi/Kconfig @@ -1,8 +1,26 @@ +config RTLWIFI + tristate "Realtek wireless card support" + depends on MAC80211 + select FW_LOADER + ---help--- + This is common code for RTL8192CE/RTL8192CU/RTL8192SE/RTL8723AE + drivers. This module does nothing by itself - the various front-end + drivers need to be enabled to support any desired devices. + + If you choose to build as a module, it'll be called rtlwifi. + +config RTLWIFI_DEBUG + bool "Debugging output for rtlwifi driver family" + depends on RTLWIFI + default y + ---help--- + To use the module option that sets the dynamic-debugging level for, + the front-end driver, this parameter must be "Y". For memory-limited + systems, choose "N". If in doubt, choose "Y". + config RTL8192CE tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter" - depends on MAC80211 && PCI - select FW_LOADER - select RTLWIFI + depends on RTLWIFI && PCI select RTL8192C_COMMON ---help--- This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe @@ -12,9 +30,7 @@ config RTL8192CE config RTL8192SE tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter" - depends on MAC80211 && PCI - select FW_LOADER - select RTLWIFI + depends on RTLWIFI && PCI ---help--- This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe wireless network adapters. @@ -23,9 +39,7 @@ config RTL8192SE config RTL8192DE tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter" - depends on MAC80211 && PCI - select FW_LOADER - select RTLWIFI + depends on RTLWIFI && PCI ---help--- This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe wireless network adapters. @@ -34,9 +48,7 @@ config RTL8192DE config RTL8723AE tristate "Realtek RTL8723AE PCIe Wireless Network Adapter" - depends on MAC80211 && PCI && EXPERIMENTAL - select FW_LOADER - select RTLWIFI + depends on RTLWIFI && PCI ---help--- This is the driver for Realtek RTL8723AE 802.11n PCIe wireless network adapters. @@ -45,9 +57,7 @@ config RTL8723AE config RTL8192CU tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter" - depends on MAC80211 && USB - select FW_LOADER - select RTLWIFI + depends on RTLWIFI && USB select RTL8192C_COMMON ---help--- This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB @@ -55,16 +65,6 @@ config RTL8192CU If you choose to build it as a module, it will be called rtl8192cu -config RTLWIFI - tristate - depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE - default m - -config RTLWIFI_DEBUG - bool "Additional debugging output" - depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE - default y - config RTL8192C_COMMON tristate depends on RTL8192CE || RTL8192CU diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 0f8b05185eda..99c5cea3fe21 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -523,8 +523,8 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw, if (mac->opmode == NL80211_IFTYPE_STATION) bw_40 = mac->bw_40; else if (mac->opmode == NL80211_IFTYPE_AP || - mac->opmode == NL80211_IFTYPE_ADHOC) - bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; + mac->opmode == NL80211_IFTYPE_ADHOC) + bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40; if (bw_40 && sgi_40) tcb_desc->use_shortgi = true; @@ -634,8 +634,7 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw, return; if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { - if (!(sta->ht_cap.ht_supported) || - !(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) + if (sta->bandwidth == IEEE80211_STA_RX_BW_20) return; } else if (mac->opmode == NL80211_IFTYPE_STATION) { if (!mac->bw_40 || !(sta->ht_cap.ht_supported)) diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index be33aa14c8af..d3ce9fbef00e 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -879,7 +879,9 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw, "IEEE80211_AMPDU_TX_START: TID:%d\n", tid); return rtl_tx_agg_start(hw, sta, tid, ssn); break; - case IEEE80211_AMPDU_TX_STOP: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid); return rtl_tx_agg_stop(hw, sta, tid); diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c index c1e065f136ba..f9f059dadb73 100644 --- a/drivers/net/wireless/rtlwifi/rc.c +++ b/drivers/net/wireless/rtlwifi/rc.c @@ -116,9 +116,8 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, if (txrc->short_preamble) rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; if (mac->opmode == NL80211_IFTYPE_AP || - mac->opmode == NL80211_IFTYPE_ADHOC) { - if (sta && (sta->ht_cap.cap & - IEEE80211_HT_CAP_SUP_WIDTH_20_40)) + mac->opmode == NL80211_IFTYPE_ADHOC) { + if (sta && (sta->bandwidth >= IEEE80211_STA_RX_BW_40)) rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; } else { if (mac->bw_40) @@ -223,13 +222,6 @@ static void rtl_rate_init(void *ppriv, { } -static void rtl_rate_update(void *ppriv, - struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - u32 changed) -{ -} - static void *rtl_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) { @@ -275,7 +267,6 @@ static struct rate_control_ops rtl_rate_ops = { .alloc_sta = rtl_rate_alloc_sta, .free_sta = rtl_rate_free_sta, .rate_init = rtl_rate_init, - .rate_update = rtl_rate_update, .tx_status = rtl_tx_status, .get_rate = rtl_get_rate, }; diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c index c1608cddc529..d7d0d4948b01 100644 --- a/drivers/net/wireless/rtlwifi/regd.c +++ b/drivers/net/wireless/rtlwifi/regd.c @@ -158,8 +158,6 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy, const struct ieee80211_reg_rule *reg_rule; struct ieee80211_channel *ch; unsigned int i; - u32 bandwidth = 0; - int r; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { @@ -174,9 +172,8 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy, (ch->flags & IEEE80211_CHAN_RADAR)) continue; if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { - r = freq_reg_info(wiphy, ch->center_freq, - bandwidth, ®_rule); - if (r) + reg_rule = freq_reg_info(wiphy, ch->center_freq); + if (IS_ERR(reg_rule)) continue; /* @@ -211,8 +208,6 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy, struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; const struct ieee80211_reg_rule *reg_rule; - u32 bandwidth = 0; - int r; if (!wiphy->bands[IEEE80211_BAND_2GHZ]) return; @@ -240,16 +235,16 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy, */ ch = &sband->channels[11]; /* CH 12 */ - r = freq_reg_info(wiphy, ch->center_freq, bandwidth, ®_rule); - if (!r) { + reg_rule = freq_reg_info(wiphy, ch->center_freq); + if (!IS_ERR(reg_rule)) { if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; } ch = &sband->channels[12]; /* CH 13 */ - r = freq_reg_info(wiphy, ch->center_freq, bandwidth, ®_rule); - if (!r) { + reg_rule = freq_reg_info(wiphy, ch->center_freq); + if (!IS_ERR(reg_rule)) { if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; @@ -303,9 +298,9 @@ static void _rtl_reg_apply_world_flags(struct wiphy *wiphy, return; } -static int _rtl_reg_notifier_apply(struct wiphy *wiphy, - struct regulatory_request *request, - struct rtl_regulatory *reg) +static void _rtl_reg_notifier_apply(struct wiphy *wiphy, + struct regulatory_request *request, + struct rtl_regulatory *reg) { /* We always apply this */ _rtl_reg_apply_radar_flags(wiphy); @@ -319,8 +314,6 @@ static int _rtl_reg_notifier_apply(struct wiphy *wiphy, _rtl_reg_apply_world_flags(wiphy, request->initiator, reg); break; } - - return 0; } static const struct ieee80211_regdomain *_rtl_regdomain_select( @@ -353,9 +346,9 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select( static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg, struct wiphy *wiphy, - int (*reg_notifier) (struct wiphy *wiphy, - struct regulatory_request * - request)) + void (*reg_notifier) (struct wiphy *wiphy, + struct regulatory_request * + request)) { const struct ieee80211_regdomain *regd; @@ -384,7 +377,7 @@ static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode) } int rtl_regd_init(struct ieee80211_hw *hw, - int (*reg_notifier) (struct wiphy *wiphy, + void (*reg_notifier) (struct wiphy *wiphy, struct regulatory_request *request)) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -426,12 +419,12 @@ int rtl_regd_init(struct ieee80211_hw *hw, return 0; } -int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) +void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_REGD, DBG_LOUD, "\n"); - return _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd); + _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd); } diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/rtlwifi/regd.h index 70ef2f418a44..4e1f4f00e6e9 100644 --- a/drivers/net/wireless/rtlwifi/regd.h +++ b/drivers/net/wireless/rtlwifi/regd.h @@ -55,7 +55,7 @@ enum country_code_type_t { }; int rtl_regd_init(struct ieee80211_hw *hw, - int (*reg_notifier) (struct wiphy *wiphy, - struct regulatory_request *request)); -int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request); + void (*reg_notifier) (struct wiphy *wiphy, + struct regulatory_request *request)); +void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c index 1cdf5a271c9f..b793a659a465 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c @@ -669,7 +669,8 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw u8 thermalvalue, delta, delta_lck, delta_iqk; long ele_a, ele_d, temp_cck, val_x, value32; long val_y, ele_c = 0; - u8 ofdm_index[2], cck_index = 0, ofdm_index_old[2], cck_index_old = 0; + u8 ofdm_index[2], ofdm_index_old[2], cck_index_old = 0; + s8 cck_index = 0; int i; bool is2t = IS_92C_SERIAL(rtlhal->version); s8 txpwr_level[2] = {0, 0}; diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c index d1f34f6ffbdf..1b65db7fd651 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c @@ -1846,9 +1846,9 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index; - u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) - ? 1 : 0; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; + u8 curshortgi_40mhz = curtxbw_40mhz && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c index c31795e379f7..b9b1a6e0b16e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c @@ -488,7 +488,7 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw, u8 *praddr; __le16 fc; u16 type, c_fc; - bool packet_matchbssid, packet_toself, packet_beacon; + bool packet_matchbssid, packet_toself, packet_beacon = false; tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift; @@ -626,8 +626,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { if (sta) - bw_40 = sta->ht_cap.cap & - IEEE80211_HT_CAP_SUP_WIDTH_20_40; + bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40; } seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index 32ff959a0251..85b6bdb163c0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -1084,7 +1084,7 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw, u8 *praddr; __le16 fc; u16 type, cpu_fc; - bool packet_matchbssid, packet_toself, packet_beacon; + bool packet_matchbssid, packet_toself, packet_beacon = false; tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift; hdr = (struct ieee80211_hdr *)tmp_buf; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index b7e6607e6b6d..a73a17bc56dd 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -76,7 +76,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) GFP_KERNEL, hw, rtl_fw_cb); - return 0; + return err; } static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw) @@ -285,6 +285,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)}, /* RTL8188CUS-VL */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)}, + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)}, /* 8188 Combo for BC4 */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, @@ -363,9 +364,15 @@ static struct usb_device_id rtl8192c_usb_ids[] = { MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids); +static int rtl8192cu_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + return rtl_usb_probe(intf, id, &rtl92cu_hal_cfg); +} + static struct usb_driver rtl8192cu_driver = { .name = "rtl8192cu", - .probe = rtl_usb_probe, + .probe = rtl8192cu_probe, .disconnect = rtl_usb_disconnect, .id_table = rtl8192c_usb_ids, diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c index fd8df233ff22..5251fb8a111e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c @@ -841,9 +841,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter( long ele_a = 0, ele_d, temp_cck, val_x, value32; long val_y, ele_c = 0; u8 ofdm_index[2]; - u8 cck_index = 0; + s8 cck_index = 0; u8 ofdm_index_old[2]; - u8 cck_index_old = 0; + s8 cck_index_old = 0; u8 index; int i; bool is2t = IS_92D_SINGLEPHY(rtlhal->version); diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c index f4051f4f0390..aa5b42521bb4 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c @@ -1970,8 +1970,7 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index; - u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) - ? 1 : 0; + u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c index a0fbf284420e..941080e03c06 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c @@ -452,7 +452,7 @@ static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw, u8 *praddr; u16 type, cfc; __le16 fc; - bool packet_matchbssid, packet_toself, packet_beacon; + bool packet_matchbssid, packet_toself, packet_beacon = false; tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift; hdr = (struct ieee80211_hdr *)tmp_buf; @@ -574,8 +574,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { if (sta) - bw_40 = sta->ht_cap.cap & - IEEE80211_HT_CAP_SUP_WIDTH_20_40; + bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40; } seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc); diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c index 28526a7361f5..084e7773bce2 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c @@ -2085,8 +2085,7 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index = 0; - u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) - ? 1 : 0; + u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c index 206561d7282f..7b0a2e75b8b8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c @@ -480,7 +480,7 @@ static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw, u8 *praddr; __le16 fc; u16 type, cfc; - bool packet_matchbssid, packet_toself, packet_beacon; + bool packet_matchbssid, packet_toself, packet_beacon = false; tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift; @@ -621,8 +621,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { if (sta) - bw_40 = sta->ht_cap.cap & - IEEE80211_HT_CAP_SUP_WIDTH_20_40; + bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40; } seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c index f55b1767ef57..35cb8f83eed4 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c @@ -252,7 +252,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, u16 box_reg = 0, box_extreg = 0; u8 u1tmp; bool isfw_rd = false; - bool bwrite_sucess = false; + bool bwrite_success = false; u8 wait_h2c_limmit = 100; u8 wait_writeh2c_limmit = 100; u8 boxcontent[4], boxextcontent[2]; @@ -291,7 +291,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, } } - while (!bwrite_sucess) { + while (!bwrite_success) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, @@ -429,7 +429,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, break; } - bwrite_sucess = true; + bwrite_success = true; rtlhal->last_hmeboxnum = boxnum + 1; if (rtlhal->last_hmeboxnum == 4) @@ -512,7 +512,6 @@ static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw, struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring; struct rtl_tx_desc *pdesc; - u8 own; unsigned long flags; struct sk_buff *pskb = NULL; @@ -525,7 +524,6 @@ static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw, spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); pdesc = &ring->desc[0]; - own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN); rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb); diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c index 887d521fe690..68c28340f791 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c @@ -1433,7 +1433,6 @@ static void _rtl8723ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); - u8 bt_retry_cnt; u8 bt_info_original; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex] Get bt info by fw!!\n"); @@ -1445,7 +1444,6 @@ static void _rtl8723ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw) "[BTCoex] c2h for btInfo not rcvd yet!!\n"); } - bt_retry_cnt = rtlhal->hal_coex_8723.bt_retry_cnt; bt_info_original = rtlhal->hal_coex_8723.c2h_bt_info_original; /* when bt inquiry or page scan, we have to set h2c 0x25 diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c index 0a8c03863fb2..9a0c71c2e15e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c @@ -703,11 +703,9 @@ static void _rtl8723ae_hw_configure(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 reg_bw_opmode; - u32 reg_ratr, reg_prsr; + u32 reg_prsr; reg_bw_opmode = BW_OPMODE_20MHZ; - reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG | - RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS; reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG; rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8); @@ -1868,8 +1866,7 @@ static void rtl8723ae_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index; - u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) - ? 1 : 0; + u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? @@ -2030,7 +2027,7 @@ bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_phy *rtlphy = &(rtlpriv->phy); - enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate; + enum rf_pwrstate e_rfpowerstate_toset; u8 u1tmp; bool actuallyset = false; @@ -2049,8 +2046,6 @@ bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) spin_unlock(&rtlpriv->locks.rf_ps_lock); } - cur_rfstate = ppsc->rfpwr_state; - rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2, rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2)&~(BIT(1))); diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c index 3d8536bb0d2b..eafbb18dd48e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c @@ -614,17 +614,11 @@ bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); int i; - bool rtstatus = true; u32 *radioa_array_table; - u32 *radiob_array_table; - u16 radioa_arraylen, radiob_arraylen; + u16 radioa_arraylen; radioa_arraylen = Rtl8723ERADIOA_1TARRAYLENGTH; radioa_array_table = RTL8723E_RADIOA_1TARRAY; - radiob_arraylen = RTL8723E_RADIOB_1TARRAYLENGTH; - radiob_array_table = RTL8723E_RADIOB_1TARRAY; - - rtstatus = true; switch (rfpath) { case RF90_PATH_A: @@ -1531,11 +1525,8 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, 0x522, 0x550, 0x551, 0x040 }; const u32 retrycount = 2; - u32 bbvalue; if (t == 0) { - bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD); - phy_save_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16); phy_save_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup); } @@ -1712,8 +1703,7 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) long result[4][8]; u8 i, final_candidate; bool patha_ok, pathb_ok; - long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, - reg_ecc, reg_tmp = 0; + long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_tmp = 0; bool is12simular, is13simular, is23simular; bool start_conttx = false, singletone = false; u32 iqk_bb_reg[10] = { @@ -1780,21 +1770,15 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) reg_e94 = result[i][0]; reg_e9c = result[i][1]; reg_ea4 = result[i][2]; - reg_eac = result[i][3]; reg_eb4 = result[i][4]; reg_ebc = result[i][5]; - reg_ec4 = result[i][6]; - reg_ecc = result[i][7]; } if (final_candidate != 0xff) { rtlphy->reg_e94 = reg_e94 = result[final_candidate][0]; rtlphy->reg_e9c = reg_e9c = result[final_candidate][1]; reg_ea4 = result[final_candidate][2]; - reg_eac = result[final_candidate][3]; rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4]; rtlphy->reg_ebc = reg_ebc = result[final_candidate][5]; - reg_ec4 = result[final_candidate][6]; - reg_ecc = result[final_candidate][7]; patha_ok = pathb_ok = true; } else { rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100; diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c index a313be8c21d2..ac081297db50 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c @@ -244,10 +244,9 @@ static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr; u8 *tmp_buf; u8 *praddr; - u8 *psaddr; __le16 fc; u16 type; - bool packet_matchbssid, packet_toself, packet_beacon; + bool packet_matchbssid, packet_toself, packet_beacon = false; tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; @@ -255,7 +254,6 @@ static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw, fc = hdr->frame_control; type = WLAN_FC_GET_TYPE(fc); praddr = hdr->addr1; - psaddr = ieee80211_get_SA(hdr); packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && (!compare_ether_addr(mac->bssid, @@ -397,8 +395,7 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw, } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { if (sta) - bw_40 = sta->ht_cap.cap & - IEEE80211_HT_CAP_SUP_WIDTH_20_40; + bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40; } seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 1535efda3d52..156b52732f3d 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -42,8 +42,12 @@ static void usbctrl_async_callback(struct urb *urb) { - if (urb) - kfree(urb->context); + if (urb) { + /* free dr */ + kfree(urb->setup_packet); + /* free databuf */ + kfree(urb->transfer_buffer); + } } static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, @@ -55,39 +59,47 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, u8 reqtype; struct usb_ctrlrequest *dr; struct urb *urb; - struct rtl819x_async_write_data { - u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE]; - struct usb_ctrlrequest dr; - } *buf; + const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE; + u8 *databuf; + + if (WARN_ON_ONCE(len > databuf_maxlen)) + len = databuf_maxlen; pipe = usb_sndctrlpipe(udev, 0); /* write_out */ reqtype = REALTEK_USB_VENQT_WRITE; - buf = kmalloc(sizeof(*buf), GFP_ATOMIC); - if (!buf) + dr = kmalloc(sizeof(*dr), GFP_ATOMIC); + if (!dr) return -ENOMEM; + databuf = kmalloc(databuf_maxlen, GFP_ATOMIC); + if (!databuf) { + kfree(dr); + return -ENOMEM; + } + urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { - kfree(buf); + kfree(databuf); + kfree(dr); return -ENOMEM; } - dr = &buf->dr; - dr->bRequestType = reqtype; dr->bRequest = request; dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(len); /* data are already in little-endian order */ - memcpy(buf, pdata, len); + memcpy(databuf, pdata, len); usb_fill_control_urb(urb, udev, pipe, - (unsigned char *)dr, buf, len, - usbctrl_async_callback, buf); + (unsigned char *)dr, databuf, len, + usbctrl_async_callback, NULL); rc = usb_submit_urb(urb, GFP_ATOMIC); - if (rc < 0) - kfree(buf); + if (rc < 0) { + kfree(databuf); + kfree(dr); + } usb_free_urb(urb); return rc; } @@ -825,8 +837,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, u32 ep_num; struct urb *_urb = NULL; struct sk_buff *_skb = NULL; - struct sk_buff_head *skb_list; - struct usb_anchor *urb_list; WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); if (unlikely(IS_USB_STOP(rtlusb))) { @@ -836,7 +846,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, return; } ep_num = rtlusb->ep_map.ep_mapping[qnum]; - skb_list = &rtlusb->tx_skb_queue[ep_num]; _skb = skb; _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num); if (unlikely(!_urb)) { @@ -844,7 +853,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, "Can't allocate urb. Drop skb!\n"); return; } - urb_list = &rtlusb->tx_pending[ep_num]; _rtl_submit_tx_urb(hw, _urb); } @@ -941,7 +949,8 @@ static struct rtl_intf_ops rtl_usb_ops = { }; int rtl_usb_probe(struct usb_interface *intf, - const struct usb_device_id *id) + const struct usb_device_id *id, + struct rtl_hal_cfg *rtl_hal_cfg) { int err; struct ieee80211_hw *hw = NULL; @@ -976,7 +985,7 @@ int rtl_usb_probe(struct usb_interface *intf, usb_set_intfdata(intf, hw); /* init cfg & intf_ops */ rtlpriv->rtlhal.interface = INTF_USB; - rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info); + rtlpriv->cfg = rtl_hal_cfg; rtlpriv->intf_ops = &rtl_usb_ops; rtl_dbgp_flag_init(hw); /* Init IO handler */ diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h index 5235136f6dd2..fb986f98d1df 100644 --- a/drivers/net/wireless/rtlwifi/usb.h +++ b/drivers/net/wireless/rtlwifi/usb.h @@ -157,7 +157,8 @@ struct rtl_usb_priv { int rtl_usb_probe(struct usb_interface *intf, - const struct usb_device_id *id); + const struct usb_device_id *id, + struct rtl_hal_cfg *rtl92cu_hal_cfg); void rtl_usb_disconnect(struct usb_interface *intf); int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message); int rtl_usb_resume(struct usb_interface *pusb_intf); diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index 21a5f4f4a135..f13258a8d995 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -1702,7 +1702,7 @@ struct rtl_works { struct rtl_debug { u32 dbgp_type[DBGP_TYPE_MAX]; - u32 global_debuglevel; + int global_debuglevel; u64 global_debugcomponents; /* add for proc debug */ diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig index be800119d0a3..cbe1e7fef61b 100644 --- a/drivers/net/wireless/ti/Kconfig +++ b/drivers/net/wireless/ti/Kconfig @@ -12,4 +12,13 @@ source "drivers/net/wireless/ti/wl18xx/Kconfig" # keep last for automatic dependencies source "drivers/net/wireless/ti/wlcore/Kconfig" + +config WILINK_PLATFORM_DATA + bool "TI WiLink platform data" + depends on WLCORE_SDIO || WL1251_SDIO + default y + ---help--- + Small platform data bit needed to pass data to the sdio modules. + + endif # WL_TI diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile index 4d6823983c04..af14231aeede 100644 --- a/drivers/net/wireless/ti/Makefile +++ b/drivers/net/wireless/ti/Makefile @@ -1,5 +1,7 @@ obj-$(CONFIG_WLCORE) += wlcore/ obj-$(CONFIG_WL12XX) += wl12xx/ -obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wlcore/ obj-$(CONFIG_WL1251) += wl1251/ obj-$(CONFIG_WL18XX) += wl18xx/ + +# small builtin driver bit +obj-$(CONFIG_WILINK_PLATFORM_DATA) += wilink_platform_data.o diff --git a/drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c b/drivers/net/wireless/ti/wilink_platform_data.c index 998e95895f9d..998e95895f9d 100644 --- a/drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c +++ b/drivers/net/wireless/ti/wilink_platform_data.c diff --git a/drivers/net/wireless/ti/wl1251/Kconfig b/drivers/net/wireless/ti/wl1251/Kconfig index 1fb65849414f..8fec4ed36ac2 100644 --- a/drivers/net/wireless/ti/wl1251/Kconfig +++ b/drivers/net/wireless/ti/wl1251/Kconfig @@ -1,6 +1,6 @@ menuconfig WL1251 tristate "TI wl1251 driver support" - depends on MAC80211 && EXPERIMENTAL && GENERIC_HARDIRQS + depends on MAC80211 && GENERIC_HARDIRQS select FW_LOADER select CRC7 ---help--- diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c index 5ec50a476a69..74ae8e1c2e33 100644 --- a/drivers/net/wireless/ti/wl1251/event.c +++ b/drivers/net/wireless/ti/wl1251/event.c @@ -29,6 +29,8 @@ static int wl1251_event_scan_complete(struct wl1251 *wl, struct event_mailbox *mbox) { + int ret = 0; + wl1251_debug(DEBUG_EVENT, "status: 0x%x, channels: %d", mbox->scheduled_scan_status, mbox->scheduled_scan_channels); @@ -37,9 +39,11 @@ static int wl1251_event_scan_complete(struct wl1251 *wl, ieee80211_scan_completed(wl->hw, false); wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed"); wl->scanning = false; + if (wl->hw->conf.flags & IEEE80211_CONF_IDLE) + ret = wl1251_ps_set_mode(wl, STATION_IDLE); } - return 0; + return ret; } static void wl1251_event_mbox_dump(struct event_mailbox *mbox) diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index f47e8b0482ad..bbbf68cf50a7 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -623,7 +623,7 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed) } } - if (changed & IEEE80211_CONF_CHANGE_IDLE) { + if (changed & IEEE80211_CONF_CHANGE_IDLE && !wl->scanning) { if (conf->flags & IEEE80211_CONF_IDLE) { ret = wl1251_ps_set_mode(wl, STATION_IDLE); if (ret < 0) @@ -895,11 +895,21 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw, if (ret < 0) goto out; + if (hw->conf.flags & IEEE80211_CONF_IDLE) { + ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE); + if (ret < 0) + goto out_sleep; + ret = wl1251_join(wl, wl->bss_type, wl->channel, + wl->beacon_int, wl->dtim_period); + if (ret < 0) + goto out_sleep; + } + skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len, req->ie_len); if (!skb) { ret = -ENOMEM; - goto out; + goto out_idle; } if (req->ie_len) memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len); @@ -908,11 +918,11 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw, skb->len); dev_kfree_skb(skb); if (ret < 0) - goto out_sleep; + goto out_idle; ret = wl1251_cmd_trigger_scan_to(wl, 0); if (ret < 0) - goto out_sleep; + goto out_idle; wl->scanning = true; @@ -920,9 +930,13 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw, req->n_channels, WL1251_SCAN_NUM_PROBES); if (ret < 0) { wl->scanning = false; - goto out_sleep; + goto out_idle; } + goto out_sleep; +out_idle: + if (hw->conf.flags & IEEE80211_CONF_IDLE) + ret = wl1251_ps_set_mode(wl, STATION_IDLE); out_sleep: wl1251_ps_elp_sleep(wl); diff --git a/drivers/net/wireless/ti/wl12xx/Makefile b/drivers/net/wireless/ti/wl12xx/Makefile index da509aa7d009..e6a24056b3c8 100644 --- a/drivers/net/wireless/ti/wl12xx/Makefile +++ b/drivers/net/wireless/ti/wl12xx/Makefile @@ -1,3 +1,3 @@ -wl12xx-objs = main.o cmd.o acx.o debugfs.o +wl12xx-objs = main.o cmd.o acx.o debugfs.o scan.o event.o obj-$(CONFIG_WL12XX) += wl12xx.o diff --git a/drivers/net/wireless/ti/wl12xx/cmd.c b/drivers/net/wireless/ti/wl12xx/cmd.c index 622206241e83..7dc9f965037d 100644 --- a/drivers/net/wireless/ti/wl12xx/cmd.c +++ b/drivers/net/wireless/ti/wl12xx/cmd.c @@ -284,3 +284,40 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl) kfree(radio_parms); return ret; } + +int wl12xx_cmd_channel_switch(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct ieee80211_channel_switch *ch_switch) +{ + struct wl12xx_cmd_channel_switch *cmd; + int ret; + + wl1271_debug(DEBUG_ACX, "cmd channel switch"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->role_id = wlvif->role_id; + cmd->channel = ch_switch->channel->hw_value; + cmd->switch_time = ch_switch->count; + cmd->stop_tx = ch_switch->block_tx; + + /* FIXME: control from mac80211 in the future */ + /* Enable TX on the target channel */ + cmd->post_switch_tx_disable = 0; + + ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to send channel switch command"); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} diff --git a/drivers/net/wireless/ti/wl12xx/cmd.h b/drivers/net/wireless/ti/wl12xx/cmd.h index 140a0e8829d5..32cbad54e993 100644 --- a/drivers/net/wireless/ti/wl12xx/cmd.h +++ b/drivers/net/wireless/ti/wl12xx/cmd.h @@ -103,10 +103,30 @@ struct wl1271_ext_radio_parms_cmd { u8 padding[3]; } __packed; +struct wl12xx_cmd_channel_switch { + struct wl1271_cmd_header header; + + u8 role_id; + + /* The new serving channel */ + u8 channel; + /* Relative time of the serving channel switch in TBTT units */ + u8 switch_time; + /* Stop the role TX, should expect it after radar detection */ + u8 stop_tx; + /* The target channel tx status 1-stopped 0-open*/ + u8 post_switch_tx_disable; + + u8 padding[3]; +} __packed; + int wl1271_cmd_general_parms(struct wl1271 *wl); int wl128x_cmd_general_parms(struct wl1271 *wl); int wl1271_cmd_radio_parms(struct wl1271 *wl); int wl128x_cmd_radio_parms(struct wl1271 *wl); int wl1271_cmd_ext_radio_parms(struct wl1271 *wl); +int wl12xx_cmd_channel_switch(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct ieee80211_channel_switch *ch_switch); #endif /* __WL12XX_CMD_H__ */ diff --git a/drivers/net/wireless/ti/wl12xx/event.c b/drivers/net/wireless/ti/wl12xx/event.c new file mode 100644 index 000000000000..6ac0ed751da8 --- /dev/null +++ b/drivers/net/wireless/ti/wl12xx/event.c @@ -0,0 +1,116 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2012 Texas Instruments. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "event.h" +#include "scan.h" +#include "../wlcore/cmd.h" +#include "../wlcore/debug.h" + +int wl12xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, + bool *timeout) +{ + u32 local_event; + + switch (event) { + case WLCORE_EVENT_ROLE_STOP_COMPLETE: + local_event = ROLE_STOP_COMPLETE_EVENT_ID; + break; + + case WLCORE_EVENT_PEER_REMOVE_COMPLETE: + local_event = PEER_REMOVE_COMPLETE_EVENT_ID; + break; + + default: + /* event not implemented */ + return 0; + } + return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout); +} + +int wl12xx_process_mailbox_events(struct wl1271 *wl) +{ + struct wl12xx_event_mailbox *mbox = wl->mbox; + u32 vector; + + + vector = le32_to_cpu(mbox->events_vector); + vector &= ~(le32_to_cpu(mbox->events_mask)); + + wl1271_debug(DEBUG_EVENT, "MBOX vector: 0x%x", vector); + + if (vector & SCAN_COMPLETE_EVENT_ID) { + wl1271_debug(DEBUG_EVENT, "status: 0x%x", + mbox->scheduled_scan_status); + + if (wl->scan_wlvif) + wl12xx_scan_completed(wl, wl->scan_wlvif); + } + + if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) { + wl1271_debug(DEBUG_EVENT, + "PERIODIC_SCAN_REPORT_EVENT (status 0x%0x)", + mbox->scheduled_scan_status); + + wlcore_scan_sched_scan_results(wl); + } + + if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID) + wlcore_event_sched_scan_completed(wl, + mbox->scheduled_scan_status); + if (vector & SOFT_GEMINI_SENSE_EVENT_ID) + wlcore_event_soft_gemini_sense(wl, + mbox->soft_gemini_sense_info); + + if (vector & BSS_LOSE_EVENT_ID) + wlcore_event_beacon_loss(wl, 0xff); + + if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) + wlcore_event_rssi_trigger(wl, mbox->rssi_snr_trigger_metric); + + if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) + wlcore_event_ba_rx_constraint(wl, + BIT(mbox->role_id), + mbox->rx_ba_allowed); + + if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) + wlcore_event_channel_switch(wl, 0xff, + mbox->channel_switch_status); + + if (vector & DUMMY_PACKET_EVENT_ID) + wlcore_event_dummy_packet(wl); + + /* + * "TX retries exceeded" has a different meaning according to mode. + * In AP mode the offending station is disconnected. + */ + if (vector & MAX_TX_RETRY_EVENT_ID) + wlcore_event_max_tx_failure(wl, + le16_to_cpu(mbox->sta_tx_retry_exceeded)); + + if (vector & INACTIVE_STA_EVENT_ID) + wlcore_event_inactive_sta(wl, + le16_to_cpu(mbox->sta_aging_status)); + + if (vector & REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID) + wlcore_event_roc_complete(wl); + + return 0; +} diff --git a/drivers/net/wireless/ti/wl12xx/event.h b/drivers/net/wireless/ti/wl12xx/event.h new file mode 100644 index 000000000000..a5cc3fcd9eea --- /dev/null +++ b/drivers/net/wireless/ti/wl12xx/event.h @@ -0,0 +1,111 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2012 Texas Instruments. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL12XX_EVENT_H__ +#define __WL12XX_EVENT_H__ + +#include "../wlcore/wlcore.h" + +enum { + MEASUREMENT_START_EVENT_ID = BIT(8), + MEASUREMENT_COMPLETE_EVENT_ID = BIT(9), + SCAN_COMPLETE_EVENT_ID = BIT(10), + WFD_DISCOVERY_COMPLETE_EVENT_ID = BIT(11), + AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(12), + RESERVED1 = BIT(13), + PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(14), + ROLE_STOP_COMPLETE_EVENT_ID = BIT(15), + RADAR_DETECTED_EVENT_ID = BIT(16), + CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17), + BSS_LOSE_EVENT_ID = BIT(18), + REGAINED_BSS_EVENT_ID = BIT(19), + MAX_TX_RETRY_EVENT_ID = BIT(20), + DUMMY_PACKET_EVENT_ID = BIT(21), + SOFT_GEMINI_SENSE_EVENT_ID = BIT(22), + CHANGE_AUTO_MODE_TIMEOUT_EVENT_ID = BIT(23), + SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24), + PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25), + INACTIVE_STA_EVENT_ID = BIT(26), + PEER_REMOVE_COMPLETE_EVENT_ID = BIT(27), + PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(28), + PERIODIC_SCAN_REPORT_EVENT_ID = BIT(29), + BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(30), + REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(31), +}; + +struct wl12xx_event_mailbox { + __le32 events_vector; + __le32 events_mask; + __le32 reserved_1; + __le32 reserved_2; + + u8 number_of_scan_results; + u8 scan_tag; + u8 completed_scan_status; + u8 reserved_3; + + u8 soft_gemini_sense_info; + u8 soft_gemini_protective_info; + s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS]; + u8 change_auto_mode_timeout; + u8 scheduled_scan_status; + u8 reserved4; + /* tuned channel (roc) */ + u8 roc_channel; + + __le16 hlid_removed_bitmap; + + /* bitmap of aged stations (by HLID) */ + __le16 sta_aging_status; + + /* bitmap of stations (by HLID) which exceeded max tx retries */ + __le16 sta_tx_retry_exceeded; + + /* discovery completed results */ + u8 discovery_tag; + u8 number_of_preq_results; + u8 number_of_prsp_results; + u8 reserved_5; + + /* rx ba constraint */ + u8 role_id; /* 0xFF means any role. */ + u8 rx_ba_allowed; + u8 reserved_6[2]; + + /* Channel switch results */ + + u8 channel_switch_role_id; + u8 channel_switch_status; + u8 reserved_7[2]; + + u8 ps_poll_delivery_failure_role_ids; + u8 stopped_role_ids; + u8 started_role_ids; + + u8 reserved_8[9]; +} __packed; + +int wl12xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, + bool *timeout); +int wl12xx_process_mailbox_events(struct wl1271 *wl); + +#endif + diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c index e5f5f8f39144..09694e39bb14 100644 --- a/drivers/net/wireless/ti/wl12xx/main.c +++ b/drivers/net/wireless/ti/wl12xx/main.c @@ -38,6 +38,8 @@ #include "reg.h" #include "cmd.h" #include "acx.h" +#include "scan.h" +#include "event.h" #include "debugfs.h" static char *fref_param; @@ -208,6 +210,8 @@ static struct wlcore_conf wl12xx_conf = { .tmpl_short_retry_limit = 10, .tmpl_long_retry_limit = 10, .tx_watchdog_timeout = 5000, + .slow_link_thold = 3, + .fast_link_thold = 10, }, .conn = { .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, @@ -265,8 +269,10 @@ static struct wlcore_conf wl12xx_conf = { .scan = { .min_dwell_time_active = 7500, .max_dwell_time_active = 30000, - .min_dwell_time_passive = 100000, - .max_dwell_time_passive = 100000, + .min_dwell_time_active_long = 25000, + .max_dwell_time_active_long = 50000, + .dwell_time_passive = 100000, + .dwell_time_dfs = 150000, .num_probe_reqs = 2, .split_scan_timeout = 50000, }, @@ -368,6 +374,10 @@ static struct wlcore_conf wl12xx_conf = { .increase_time = 1, .window_size = 16, }, + .recovery = { + .bug_on_recovery = 0, + .no_recovery = 0, + }, }; static struct wl12xx_priv_conf wl12xx_default_priv_conf = { @@ -601,9 +611,9 @@ static int wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len) { int ret; - if (wl->chip.id != CHIP_ID_1283_PG20) { + if (wl->chip.id != CHIP_ID_128X_PG20) { struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; - struct wl127x_rx_mem_pool_addr rx_mem_addr; + struct wl12xx_priv *priv = wl->priv; /* * Choose the block we want to read @@ -612,13 +622,13 @@ static int wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len) */ u32 mem_block = rx_desc & RX_MEM_BLOCK_MASK; - rx_mem_addr.addr = (mem_block << 8) + + priv->rx_mem_addr->addr = (mem_block << 8) + le32_to_cpu(wl_mem_map->packet_memory_pool_start); - rx_mem_addr.addr_extra = rx_mem_addr.addr + 4; + priv->rx_mem_addr->addr_extra = priv->rx_mem_addr->addr + 4; - ret = wlcore_write(wl, WL1271_SLV_REG_DATA, &rx_mem_addr, - sizeof(rx_mem_addr), false); + ret = wlcore_write(wl, WL1271_SLV_REG_DATA, priv->rx_mem_addr, + sizeof(*priv->rx_mem_addr), false); if (ret < 0) return ret; } @@ -631,13 +641,15 @@ static int wl12xx_identify_chip(struct wl1271 *wl) int ret = 0; switch (wl->chip.id) { - case CHIP_ID_1271_PG10: + case CHIP_ID_127X_PG10: wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete", wl->chip.id); wl->quirks |= WLCORE_QUIRK_LEGACY_NVS | WLCORE_QUIRK_DUAL_PROBE_TMPL | - WLCORE_QUIRK_TKIP_HEADER_SPACE; + WLCORE_QUIRK_TKIP_HEADER_SPACE | + WLCORE_QUIRK_START_STA_FAILS | + WLCORE_QUIRK_AP_ZERO_SESSION_ID; wl->sr_fw_name = WL127X_FW_NAME_SINGLE; wl->mr_fw_name = WL127X_FW_NAME_MULTI; memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x, @@ -646,18 +658,22 @@ static int wl12xx_identify_chip(struct wl1271 *wl) /* read data preparation is only needed by wl127x */ wl->ops->prepare_read = wl127x_prepare_read; - wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER, - WL127X_MAJOR_VER, WL127X_SUBTYPE_VER, - WL127X_MINOR_VER); + wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, + WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER, + WL127X_SUBTYPE_SR_VER, WL127X_MINOR_SR_VER, + WL127X_IFTYPE_MR_VER, WL127X_MAJOR_MR_VER, + WL127X_SUBTYPE_MR_VER, WL127X_MINOR_MR_VER); break; - case CHIP_ID_1271_PG20: + case CHIP_ID_127X_PG20: wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", wl->chip.id); wl->quirks |= WLCORE_QUIRK_LEGACY_NVS | WLCORE_QUIRK_DUAL_PROBE_TMPL | - WLCORE_QUIRK_TKIP_HEADER_SPACE; + WLCORE_QUIRK_TKIP_HEADER_SPACE | + WLCORE_QUIRK_START_STA_FAILS | + WLCORE_QUIRK_AP_ZERO_SESSION_ID; wl->plt_fw_name = WL127X_PLT_FW_NAME; wl->sr_fw_name = WL127X_FW_NAME_SINGLE; wl->mr_fw_name = WL127X_FW_NAME_MULTI; @@ -667,12 +683,14 @@ static int wl12xx_identify_chip(struct wl1271 *wl) /* read data preparation is only needed by wl127x */ wl->ops->prepare_read = wl127x_prepare_read; - wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER, - WL127X_MAJOR_VER, WL127X_SUBTYPE_VER, - WL127X_MINOR_VER); + wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, + WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER, + WL127X_SUBTYPE_SR_VER, WL127X_MINOR_SR_VER, + WL127X_IFTYPE_MR_VER, WL127X_MAJOR_MR_VER, + WL127X_SUBTYPE_MR_VER, WL127X_MINOR_MR_VER); break; - case CHIP_ID_1283_PG20: + case CHIP_ID_128X_PG20: wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)", wl->chip.id); wl->plt_fw_name = WL128X_PLT_FW_NAME; @@ -682,19 +700,29 @@ static int wl12xx_identify_chip(struct wl1271 *wl) /* wl128x requires TX blocksize alignment */ wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN | WLCORE_QUIRK_DUAL_PROBE_TMPL | - WLCORE_QUIRK_TKIP_HEADER_SPACE; - - wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER, WL128X_IFTYPE_VER, - WL128X_MAJOR_VER, WL128X_SUBTYPE_VER, - WL128X_MINOR_VER); + WLCORE_QUIRK_TKIP_HEADER_SPACE | + WLCORE_QUIRK_START_STA_FAILS | + WLCORE_QUIRK_AP_ZERO_SESSION_ID; + + wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER, + WL128X_IFTYPE_SR_VER, WL128X_MAJOR_SR_VER, + WL128X_SUBTYPE_SR_VER, WL128X_MINOR_SR_VER, + WL128X_IFTYPE_MR_VER, WL128X_MAJOR_MR_VER, + WL128X_SUBTYPE_MR_VER, WL128X_MINOR_MR_VER); break; - case CHIP_ID_1283_PG10: + case CHIP_ID_128X_PG10: default: wl1271_warning("unsupported chip id: 0x%x", wl->chip.id); ret = -ENODEV; goto out; } + /* common settings */ + wl->scan_templ_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY; + wl->scan_templ_id_5 = CMD_TEMPL_APP_PROBE_REQ_5_LEGACY; + wl->sched_scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4; + wl->sched_scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5; + wl->max_channels_5 = WL12XX_MAX_CHANNELS_5GHZ; out: return ret; } @@ -1067,7 +1095,7 @@ static int wl12xx_pre_boot(struct wl1271 *wl) u32 clk; int selected_clock = -1; - if (wl->chip.id == CHIP_ID_1283_PG20) { + if (wl->chip.id == CHIP_ID_128X_PG20) { ret = wl128x_boot_clk(wl, &selected_clock); if (ret < 0) goto out; @@ -1098,7 +1126,7 @@ static int wl12xx_pre_boot(struct wl1271 *wl) wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); - if (wl->chip.id == CHIP_ID_1283_PG20) + if (wl->chip.id == CHIP_ID_128X_PG20) clk |= ((selected_clock & 0x3) << 1) << 4; else clk |= (priv->ref_clock << 1) << 4; @@ -1152,7 +1180,7 @@ static int wl12xx_pre_upload(struct wl1271 *wl) /* WL1271: The reference driver skips steps 7 to 10 (jumps directly * to upload_fw) */ - if (wl->chip.id == CHIP_ID_1283_PG20) { + if (wl->chip.id == CHIP_ID_128X_PG20) { ret = wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA); if (ret < 0) goto out; @@ -1219,6 +1247,23 @@ static int wl12xx_boot(struct wl1271 *wl) if (ret < 0) goto out; + wl->event_mask = BSS_LOSE_EVENT_ID | + REGAINED_BSS_EVENT_ID | + SCAN_COMPLETE_EVENT_ID | + ROLE_STOP_COMPLETE_EVENT_ID | + RSSI_SNR_TRIGGER_0_EVENT_ID | + PSPOLL_DELIVERY_FAILURE_EVENT_ID | + SOFT_GEMINI_SENSE_EVENT_ID | + PERIODIC_SCAN_REPORT_EVENT_ID | + PERIODIC_SCAN_COMPLETE_EVENT_ID | + DUMMY_PACKET_EVENT_ID | + PEER_REMOVE_COMPLETE_EVENT_ID | + BA_SESSION_RX_CONSTRAINT_EVENT_ID | + REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID | + INACTIVE_STA_EVENT_ID | + MAX_TX_RETRY_EVENT_ID | + CHANNEL_SWITCH_COMPLETE_EVENT_ID; + ret = wlcore_boot_run_firmware(wl); if (ret < 0) goto out; @@ -1261,7 +1306,7 @@ static void wl12xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc, u32 blks, u32 spare_blks) { - if (wl->chip.id == CHIP_ID_1283_PG20) { + if (wl->chip.id == CHIP_ID_128X_PG20) { desc->wl128x_mem.total_mem_blocks = blks; } else { desc->wl127x_mem.extra_blocks = spare_blks; @@ -1275,7 +1320,7 @@ wl12xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc, { u32 aligned_len = wlcore_calc_packet_alignment(wl, skb->len); - if (wl->chip.id == CHIP_ID_1283_PG20) { + if (wl->chip.id == CHIP_ID_128X_PG20) { desc->wl128x_mem.extra_bytes = aligned_len - skb->len; desc->length = cpu_to_le16(aligned_len >> 2); @@ -1339,7 +1384,7 @@ static int wl12xx_hw_init(struct wl1271 *wl) { int ret; - if (wl->chip.id == CHIP_ID_1283_PG20) { + if (wl->chip.id == CHIP_ID_128X_PG20) { u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE; ret = wl128x_cmd_general_parms(wl); @@ -1394,22 +1439,6 @@ static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl, return wlvif->rate_set; } -static int wl12xx_identify_fw(struct wl1271 *wl) -{ - unsigned int *fw_ver = wl->chip.fw_ver; - - /* Only new station firmwares support routing fw logs to the host */ - if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) && - (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN)) - wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED; - - /* This feature is not yet supported for AP mode */ - if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) - wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED; - - return 0; -} - static void wl12xx_conf_init(struct wl1271 *wl) { struct wl12xx_priv *priv = wl->priv; @@ -1426,7 +1455,7 @@ static bool wl12xx_mac_in_fuse(struct wl1271 *wl) bool supported = false; u8 major, minor; - if (wl->chip.id == CHIP_ID_1283_PG20) { + if (wl->chip.id == CHIP_ID_128X_PG20) { major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver); minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver); @@ -1482,7 +1511,7 @@ static int wl12xx_get_pg_ver(struct wl1271 *wl, s8 *ver) u16 die_info; int ret; - if (wl->chip.id == CHIP_ID_1283_PG20) + if (wl->chip.id == CHIP_ID_128X_PG20) ret = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1, &die_info); else @@ -1589,16 +1618,46 @@ static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd, return wlcore_set_key(wl, cmd, vif, sta, key_conf); } +static int wl12xx_set_peer_cap(struct wl1271 *wl, + struct ieee80211_sta_ht_cap *ht_cap, + bool allow_ht_operation, + u32 rate_set, u8 hlid) +{ + return wl1271_acx_set_ht_capabilities(wl, ht_cap, allow_ht_operation, + hlid); +} + +static bool wl12xx_lnk_high_prio(struct wl1271 *wl, u8 hlid, + struct wl1271_link *lnk) +{ + u8 thold; + + if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map)) + thold = wl->conf.tx.fast_link_thold; + else + thold = wl->conf.tx.slow_link_thold; + + return lnk->allocated_pkts < thold; +} + +static bool wl12xx_lnk_low_prio(struct wl1271 *wl, u8 hlid, + struct wl1271_link *lnk) +{ + /* any link is good for low priority */ + return true; +} + static int wl12xx_setup(struct wl1271 *wl); static struct wlcore_ops wl12xx_ops = { .setup = wl12xx_setup, .identify_chip = wl12xx_identify_chip, - .identify_fw = wl12xx_identify_fw, .boot = wl12xx_boot, .plt_init = wl12xx_plt_init, .trigger_cmd = wl12xx_trigger_cmd, .ack_event = wl12xx_ack_event, + .wait_for_event = wl12xx_wait_for_event, + .process_mailbox_events = wl12xx_process_mailbox_events, .calc_tx_blocks = wl12xx_calc_tx_blocks, .set_tx_desc_blocks = wl12xx_set_tx_desc_blocks, .set_tx_desc_data_len = wl12xx_set_tx_desc_data_len, @@ -1615,9 +1674,17 @@ static struct wlcore_ops wl12xx_ops = { .set_rx_csum = NULL, .ap_get_mimo_wide_rate_mask = NULL, .debugfs_init = wl12xx_debugfs_add_files, + .scan_start = wl12xx_scan_start, + .scan_stop = wl12xx_scan_stop, + .sched_scan_start = wl12xx_sched_scan_start, + .sched_scan_stop = wl12xx_scan_sched_scan_stop, .get_spare_blocks = wl12xx_get_spare_blocks, .set_key = wl12xx_set_key, + .channel_switch = wl12xx_cmd_channel_switch, .pre_pkt_send = NULL, + .set_peer_cap = wl12xx_set_peer_cap, + .lnk_high_prio = wl12xx_lnk_high_prio, + .lnk_low_prio = wl12xx_lnk_low_prio, }; static struct ieee80211_sta_ht_cap wl12xx_ht_cap = { @@ -1636,11 +1703,13 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = { static int wl12xx_setup(struct wl1271 *wl) { struct wl12xx_priv *priv = wl->priv; - struct wl12xx_platform_data *pdata = wl->pdev->dev.platform_data; + struct wlcore_platdev_data *pdev_data = wl->pdev->dev.platform_data; + struct wl12xx_platform_data *pdata = pdev_data->pdata; wl->rtable = wl12xx_rtable; wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS; wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS; + wl->num_channels = 1; wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES; wl->band_rate_to_idx = wl12xx_band_rate_to_idx; wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX; @@ -1693,6 +1762,10 @@ static int wl12xx_setup(struct wl1271 *wl) wl1271_error("Invalid tcxo parameter %s", tcxo_param); } + priv->rx_mem_addr = kmalloc(sizeof(*priv->rx_mem_addr), GFP_KERNEL); + if (!priv->rx_mem_addr) + return -ENOMEM; + return 0; } @@ -1703,7 +1776,8 @@ static int wl12xx_probe(struct platform_device *pdev) int ret; hw = wlcore_alloc_hw(sizeof(struct wl12xx_priv), - WL12XX_AGGR_BUFFER_SIZE); + WL12XX_AGGR_BUFFER_SIZE, + sizeof(struct wl12xx_event_mailbox)); if (IS_ERR(hw)) { wl1271_error("can't allocate hw"); ret = PTR_ERR(hw); @@ -1725,6 +1799,21 @@ out: return ret; } +static int wl12xx_remove(struct platform_device *pdev) +{ + struct wl1271 *wl = platform_get_drvdata(pdev); + struct wl12xx_priv *priv; + + if (!wl) + goto out; + priv = wl->priv; + + kfree(priv->rx_mem_addr); + +out: + return wlcore_remove(pdev); +} + static const struct platform_device_id wl12xx_id_table[] = { { "wl12xx", 0 }, { } /* Terminating Entry */ @@ -1733,7 +1822,7 @@ MODULE_DEVICE_TABLE(platform, wl12xx_id_table); static struct platform_driver wl12xx_driver = { .probe = wl12xx_probe, - .remove = wlcore_remove, + .remove = wl12xx_remove, .id_table = wl12xx_id_table, .driver = { .name = "wl12xx_driver", diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c new file mode 100644 index 000000000000..affdb3ec6225 --- /dev/null +++ b/drivers/net/wireless/ti/wl12xx/scan.c @@ -0,0 +1,501 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2012 Texas Instruments. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include <linux/ieee80211.h> +#include "scan.h" +#include "../wlcore/debug.h" +#include "../wlcore/tx.h" + +static int wl1271_get_scan_channels(struct wl1271 *wl, + struct cfg80211_scan_request *req, + struct basic_scan_channel_params *channels, + enum ieee80211_band band, bool passive) +{ + struct conf_scan_settings *c = &wl->conf.scan; + int i, j; + u32 flags; + + for (i = 0, j = 0; + i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS; + i++) { + flags = req->channels[i]->flags; + + if (!test_bit(i, wl->scan.scanned_ch) && + !(flags & IEEE80211_CHAN_DISABLED) && + (req->channels[i]->band == band) && + /* + * In passive scans, we scan all remaining + * channels, even if not marked as such. + * In active scans, we only scan channels not + * marked as passive. + */ + (passive || !(flags & IEEE80211_CHAN_PASSIVE_SCAN))) { + wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ", + req->channels[i]->band, + req->channels[i]->center_freq); + wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X", + req->channels[i]->hw_value, + req->channels[i]->flags); + wl1271_debug(DEBUG_SCAN, + "max_antenna_gain %d, max_power %d", + req->channels[i]->max_antenna_gain, + req->channels[i]->max_power); + wl1271_debug(DEBUG_SCAN, "beacon_found %d", + req->channels[i]->beacon_found); + + if (!passive) { + channels[j].min_duration = + cpu_to_le32(c->min_dwell_time_active); + channels[j].max_duration = + cpu_to_le32(c->max_dwell_time_active); + } else { + channels[j].min_duration = + cpu_to_le32(c->dwell_time_passive); + channels[j].max_duration = + cpu_to_le32(c->dwell_time_passive); + } + channels[j].early_termination = 0; + channels[j].tx_power_att = req->channels[i]->max_power; + channels[j].channel = req->channels[i]->hw_value; + + memset(&channels[j].bssid_lsb, 0xff, 4); + memset(&channels[j].bssid_msb, 0xff, 2); + + /* Mark the channels we already used */ + set_bit(i, wl->scan.scanned_ch); + + j++; + } + } + + return j; +} + +#define WL1271_NOTHING_TO_SCAN 1 + +static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum ieee80211_band band, + bool passive, u32 basic_rate) +{ + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct wl1271_cmd_scan *cmd; + struct wl1271_cmd_trigger_scan_to *trigger; + int ret; + u16 scan_options = 0; + + /* skip active scans if we don't have SSIDs */ + if (!passive && wl->scan.req->n_ssids == 0) + return WL1271_NOTHING_TO_SCAN; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); + if (!cmd || !trigger) { + ret = -ENOMEM; + goto out; + } + + if (wl->conf.scan.split_scan_timeout) + scan_options |= WL1271_SCAN_OPT_SPLIT_SCAN; + + if (passive) + scan_options |= WL1271_SCAN_OPT_PASSIVE; + + cmd->params.role_id = wlvif->role_id; + + if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) { + ret = -EINVAL; + goto out; + } + + cmd->params.scan_options = cpu_to_le16(scan_options); + + cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req, + cmd->channels, + band, passive); + if (cmd->params.n_ch == 0) { + ret = WL1271_NOTHING_TO_SCAN; + goto out; + } + + cmd->params.tx_rate = cpu_to_le32(basic_rate); + cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs; + cmd->params.tid_trigger = CONF_TX_AC_ANY_TID; + cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; + + if (band == IEEE80211_BAND_2GHZ) + cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ; + else + cmd->params.band = WL1271_SCAN_BAND_5_GHZ; + + if (wl->scan.ssid_len && wl->scan.ssid) { + cmd->params.ssid_len = wl->scan.ssid_len; + memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len); + } + + memcpy(cmd->addr, vif->addr, ETH_ALEN); + + ret = wl12xx_cmd_build_probe_req(wl, wlvif, + cmd->params.role_id, band, + wl->scan.ssid, wl->scan.ssid_len, + wl->scan.req->ie, + wl->scan.req->ie_len, false); + if (ret < 0) { + wl1271_error("PROBE request template failed"); + goto out; + } + + trigger->timeout = cpu_to_le32(wl->conf.scan.split_scan_timeout); + ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger, + sizeof(*trigger), 0); + if (ret < 0) { + wl1271_error("trigger scan to failed for hw scan"); + goto out; + } + + wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd)); + + ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("SCAN failed"); + goto out; + } + +out: + kfree(cmd); + kfree(trigger); + return ret; +} + +int wl12xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct wl1271_cmd_header *cmd = NULL; + int ret = 0; + + if (WARN_ON(wl->scan.state == WL1271_SCAN_STATE_IDLE)) + return -EINVAL; + + wl1271_debug(DEBUG_CMD, "cmd scan stop"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, cmd, + sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("cmd stop_scan failed"); + goto out; + } +out: + kfree(cmd); + return ret; +} + +void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret = 0; + enum ieee80211_band band; + u32 rate, mask; + + switch (wl->scan.state) { + case WL1271_SCAN_STATE_IDLE: + break; + + case WL1271_SCAN_STATE_2GHZ_ACTIVE: + band = IEEE80211_BAND_2GHZ; + mask = wlvif->bitrate_masks[band]; + if (wl->scan.req->no_cck) { + mask &= ~CONF_TX_CCK_RATES; + if (!mask) + mask = CONF_TX_RATE_MASK_BASIC_P2P; + } + rate = wl1271_tx_min_rate_get(wl, mask); + ret = wl1271_scan_send(wl, wlvif, band, false, rate); + if (ret == WL1271_NOTHING_TO_SCAN) { + wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE; + wl1271_scan_stm(wl, wlvif); + } + + break; + + case WL1271_SCAN_STATE_2GHZ_PASSIVE: + band = IEEE80211_BAND_2GHZ; + mask = wlvif->bitrate_masks[band]; + if (wl->scan.req->no_cck) { + mask &= ~CONF_TX_CCK_RATES; + if (!mask) + mask = CONF_TX_RATE_MASK_BASIC_P2P; + } + rate = wl1271_tx_min_rate_get(wl, mask); + ret = wl1271_scan_send(wl, wlvif, band, true, rate); + if (ret == WL1271_NOTHING_TO_SCAN) { + if (wl->enable_11a) + wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE; + else + wl->scan.state = WL1271_SCAN_STATE_DONE; + wl1271_scan_stm(wl, wlvif); + } + + break; + + case WL1271_SCAN_STATE_5GHZ_ACTIVE: + band = IEEE80211_BAND_5GHZ; + rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); + ret = wl1271_scan_send(wl, wlvif, band, false, rate); + if (ret == WL1271_NOTHING_TO_SCAN) { + wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE; + wl1271_scan_stm(wl, wlvif); + } + + break; + + case WL1271_SCAN_STATE_5GHZ_PASSIVE: + band = IEEE80211_BAND_5GHZ; + rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); + ret = wl1271_scan_send(wl, wlvif, band, true, rate); + if (ret == WL1271_NOTHING_TO_SCAN) { + wl->scan.state = WL1271_SCAN_STATE_DONE; + wl1271_scan_stm(wl, wlvif); + } + + break; + + case WL1271_SCAN_STATE_DONE: + wl->scan.failed = false; + cancel_delayed_work(&wl->scan_complete_work); + ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, + msecs_to_jiffies(0)); + break; + + default: + wl1271_error("invalid scan state"); + break; + } + + if (ret < 0) { + cancel_delayed_work(&wl->scan_complete_work); + ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, + msecs_to_jiffies(0)); + } +} + +static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd, + struct wlcore_scan_channels *cmd_channels) +{ + memcpy(cmd->passive, cmd_channels->passive, sizeof(cmd->passive)); + memcpy(cmd->active, cmd_channels->active, sizeof(cmd->active)); + cmd->dfs = cmd_channels->dfs; + cmd->n_pactive_ch = cmd_channels->passive_active; + + memcpy(cmd->channels_2, cmd_channels->channels_2, + sizeof(cmd->channels_2)); + memcpy(cmd->channels_5, cmd_channels->channels_5, + sizeof(cmd->channels_2)); + /* channels_4 are not supported, so no need to copy them */ +} + +int wl1271_scan_sched_scan_config(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_sched_scan_ies *ies) +{ + struct wl1271_cmd_sched_scan_config *cfg = NULL; + struct wlcore_scan_channels *cfg_channels = NULL; + struct conf_sched_scan_settings *c = &wl->conf.sched_scan; + int i, ret; + bool force_passive = !req->n_ssids; + + wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config"); + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return -ENOMEM; + + cfg->role_id = wlvif->role_id; + cfg->rssi_threshold = c->rssi_threshold; + cfg->snr_threshold = c->snr_threshold; + cfg->n_probe_reqs = c->num_probe_reqs; + /* cycles set to 0 it means infinite (until manually stopped) */ + cfg->cycles = 0; + /* report APs when at least 1 is found */ + cfg->report_after = 1; + /* don't stop scanning automatically when something is found */ + cfg->terminate = 0; + cfg->tag = WL1271_SCAN_DEFAULT_TAG; + /* don't filter on BSS type */ + cfg->bss_type = SCAN_BSS_TYPE_ANY; + /* currently NL80211 supports only a single interval */ + for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++) + cfg->intervals[i] = cpu_to_le32(req->interval); + + cfg->ssid_len = 0; + ret = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req); + if (ret < 0) + goto out; + + cfg->filter_type = ret; + + wl1271_debug(DEBUG_SCAN, "filter_type = %d", cfg->filter_type); + + cfg_channels = kzalloc(sizeof(*cfg_channels), GFP_KERNEL); + if (!cfg_channels) { + ret = -ENOMEM; + goto out; + } + + if (!wlcore_set_scan_chan_params(wl, cfg_channels, req->channels, + req->n_channels, req->n_ssids, + SCAN_TYPE_PERIODIC)) { + wl1271_error("scan channel list is empty"); + ret = -EINVAL; + goto out; + } + wl12xx_adjust_channels(cfg, cfg_channels); + + if (!force_passive && cfg->active[0]) { + u8 band = IEEE80211_BAND_2GHZ; + ret = wl12xx_cmd_build_probe_req(wl, wlvif, + wlvif->role_id, band, + req->ssids[0].ssid, + req->ssids[0].ssid_len, + ies->ie[band], + ies->len[band], true); + if (ret < 0) { + wl1271_error("2.4GHz PROBE request template failed"); + goto out; + } + } + + if (!force_passive && cfg->active[1]) { + u8 band = IEEE80211_BAND_5GHZ; + ret = wl12xx_cmd_build_probe_req(wl, wlvif, + wlvif->role_id, band, + req->ssids[0].ssid, + req->ssids[0].ssid_len, + ies->ie[band], + ies->len[band], true); + if (ret < 0) { + wl1271_error("5GHz PROBE request template failed"); + goto out; + } + } + + wl1271_dump(DEBUG_SCAN, "SCAN_CFG: ", cfg, sizeof(*cfg)); + + ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_CFG, cfg, + sizeof(*cfg), 0); + if (ret < 0) { + wl1271_error("SCAN configuration failed"); + goto out; + } +out: + kfree(cfg_channels); + kfree(cfg); + return ret; +} + +int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct wl1271_cmd_sched_scan_start *start; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd periodic scan start"); + + if (wlvif->bss_type != BSS_TYPE_STA_BSS) + return -EOPNOTSUPP; + + if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) && + test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) + return -EBUSY; + + start = kzalloc(sizeof(*start), GFP_KERNEL); + if (!start) + return -ENOMEM; + + start->role_id = wlvif->role_id; + start->tag = WL1271_SCAN_DEFAULT_TAG; + + ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start, + sizeof(*start), 0); + if (ret < 0) { + wl1271_error("failed to send scan start command"); + goto out_free; + } + +out_free: + kfree(start); + return ret; +} + +int wl12xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_sched_scan_ies *ies) +{ + int ret; + + ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies); + if (ret < 0) + return ret; + + return wl1271_scan_sched_scan_start(wl, wlvif); +} + +void wl12xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct wl1271_cmd_sched_scan_stop *stop; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd periodic scan stop"); + + /* FIXME: what to do if alloc'ing to stop fails? */ + stop = kzalloc(sizeof(*stop), GFP_KERNEL); + if (!stop) { + wl1271_error("failed to alloc memory to send sched scan stop"); + return; + } + + stop->role_id = wlvif->role_id; + stop->tag = WL1271_SCAN_DEFAULT_TAG; + + ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop, + sizeof(*stop), 0); + if (ret < 0) { + wl1271_error("failed to send sched scan stop command"); + goto out_free; + } + +out_free: + kfree(stop); +} + +int wl12xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct cfg80211_scan_request *req) +{ + wl1271_scan_stm(wl, wlvif); + return 0; +} + +void wl12xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + wl1271_scan_stm(wl, wlvif); +} diff --git a/drivers/net/wireless/ti/wl12xx/scan.h b/drivers/net/wireless/ti/wl12xx/scan.h new file mode 100644 index 000000000000..264af7ac2785 --- /dev/null +++ b/drivers/net/wireless/ti/wl12xx/scan.h @@ -0,0 +1,140 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2012 Texas Instruments. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL12XX_SCAN_H__ +#define __WL12XX_SCAN_H__ + +#include "../wlcore/wlcore.h" +#include "../wlcore/cmd.h" +#include "../wlcore/scan.h" + +#define WL12XX_MAX_CHANNELS_5GHZ 23 + +struct basic_scan_params { + /* Scan option flags (WL1271_SCAN_OPT_*) */ + __le16 scan_options; + u8 role_id; + /* Number of scan channels in the list (maximum 30) */ + u8 n_ch; + /* This field indicates the number of probe requests to send + per channel for an active scan */ + u8 n_probe_reqs; + u8 tid_trigger; + u8 ssid_len; + u8 use_ssid_list; + + /* Rate bit field for sending the probes */ + __le32 tx_rate; + + u8 ssid[IEEE80211_MAX_SSID_LEN]; + /* Band to scan */ + u8 band; + + u8 scan_tag; + u8 padding2[2]; +} __packed; + +struct basic_scan_channel_params { + /* Duration in TU to wait for frames on a channel for active scan */ + __le32 min_duration; + __le32 max_duration; + __le32 bssid_lsb; + __le16 bssid_msb; + u8 early_termination; + u8 tx_power_att; + u8 channel; + /* FW internal use only! */ + u8 dfs_candidate; + u8 activity_detected; + u8 pad; +} __packed; + +struct wl1271_cmd_scan { + struct wl1271_cmd_header header; + + struct basic_scan_params params; + struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS]; + + /* src mac address */ + u8 addr[ETH_ALEN]; + u8 padding[2]; +} __packed; + +struct wl1271_cmd_sched_scan_config { + struct wl1271_cmd_header header; + + __le32 intervals[SCAN_MAX_CYCLE_INTERVALS]; + + s8 rssi_threshold; /* for filtering (in dBm) */ + s8 snr_threshold; /* for filtering (in dB) */ + + u8 cycles; /* maximum number of scan cycles */ + u8 report_after; /* report when this number of results are received */ + u8 terminate; /* stop scanning after reporting */ + + u8 tag; + u8 bss_type; /* for filtering */ + u8 filter_type; + + u8 ssid_len; /* For SCAN_SSID_FILTER_SPECIFIC */ + u8 ssid[IEEE80211_MAX_SSID_LEN]; + + u8 n_probe_reqs; /* Number of probes requests per channel */ + + u8 passive[SCAN_MAX_BANDS]; + u8 active[SCAN_MAX_BANDS]; + + u8 dfs; + + u8 n_pactive_ch; /* number of pactive (passive until fw detects energy) + channels in BG band */ + u8 role_id; + u8 padding[1]; + struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ]; + struct conn_scan_ch_params channels_5[WL12XX_MAX_CHANNELS_5GHZ]; + struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ]; +} __packed; + +struct wl1271_cmd_sched_scan_start { + struct wl1271_cmd_header header; + + u8 tag; + u8 role_id; + u8 padding[2]; +} __packed; + +struct wl1271_cmd_sched_scan_stop { + struct wl1271_cmd_header header; + + u8 tag; + u8 role_id; + u8 padding[2]; +} __packed; + +int wl12xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct cfg80211_scan_request *req); +int wl12xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif); +void wl12xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_sched_scan_ies *ies); +void wl12xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif); +#endif diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h index 7182bbf6625d..d4552857480c 100644 --- a/drivers/net/wireless/ti/wl12xx/wl12xx.h +++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h @@ -24,19 +24,37 @@ #include "conf.h" -/* minimum FW required for driver for wl127x */ +/* WiLink 6/7 chip IDs */ +#define CHIP_ID_127X_PG10 (0x04030101) +#define CHIP_ID_127X_PG20 (0x04030111) +#define CHIP_ID_128X_PG10 (0x05030101) +#define CHIP_ID_128X_PG20 (0x05030111) + +/* FW chip version for wl127x */ #define WL127X_CHIP_VER 6 -#define WL127X_IFTYPE_VER 3 -#define WL127X_MAJOR_VER 10 -#define WL127X_SUBTYPE_VER 2 -#define WL127X_MINOR_VER 115 +/* minimum single-role FW version for wl127x */ +#define WL127X_IFTYPE_SR_VER 3 +#define WL127X_MAJOR_SR_VER 10 +#define WL127X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE +#define WL127X_MINOR_SR_VER 115 +/* minimum multi-role FW version for wl127x */ +#define WL127X_IFTYPE_MR_VER 5 +#define WL127X_MAJOR_MR_VER 7 +#define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE +#define WL127X_MINOR_MR_VER 115 -/* minimum FW required for driver for wl128x */ +/* FW chip version for wl128x */ #define WL128X_CHIP_VER 7 -#define WL128X_IFTYPE_VER 3 -#define WL128X_MAJOR_VER 10 -#define WL128X_SUBTYPE_VER 2 -#define WL128X_MINOR_VER 115 +/* minimum single-role FW version for wl128x */ +#define WL128X_IFTYPE_SR_VER 3 +#define WL128X_MAJOR_SR_VER 10 +#define WL128X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE +#define WL128X_MINOR_SR_VER 115 +/* minimum multi-role FW version for wl128x */ +#define WL128X_IFTYPE_MR_VER 5 +#define WL128X_MAJOR_MR_VER 7 +#define WL128X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE +#define WL128X_MINOR_MR_VER 42 #define WL12XX_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) @@ -55,6 +73,8 @@ struct wl12xx_priv { int ref_clock; int tcxo_clock; + + struct wl127x_rx_mem_pool_addr *rx_mem_addr; }; #endif /* __WL12XX_PRIV_H__ */ diff --git a/drivers/net/wireless/ti/wl18xx/Makefile b/drivers/net/wireless/ti/wl18xx/Makefile index 67c098734c7f..ae2b81735785 100644 --- a/drivers/net/wireless/ti/wl18xx/Makefile +++ b/drivers/net/wireless/ti/wl18xx/Makefile @@ -1,3 +1,3 @@ -wl18xx-objs = main.o acx.o tx.o io.o debugfs.o +wl18xx-objs = main.o acx.o tx.o io.o debugfs.o scan.o cmd.o event.o obj-$(CONFIG_WL18XX) += wl18xx.o diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c index 72840e23bf59..a169bb5a5dbf 100644 --- a/drivers/net/wireless/ti/wl18xx/acx.c +++ b/drivers/net/wireless/ti/wl18xx/acx.c @@ -75,7 +75,7 @@ int wl18xx_acx_set_checksum_state(struct wl1271 *wl) acx->checksum_state = CHECKSUM_OFFLOAD_ENABLED; - ret = wl1271_cmd_configure(wl, ACX_CHECKSUM_CONFIG, acx, sizeof(*acx)); + ret = wl1271_cmd_configure(wl, ACX_CSUM_CONFIG, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("failed to set Tx checksum state: %d", ret); goto out; @@ -109,3 +109,88 @@ out: kfree(acx); return ret; } + +int wl18xx_acx_peer_ht_operation_mode(struct wl1271 *wl, u8 hlid, bool wide) +{ + struct wlcore_peer_ht_operation_mode *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx peer ht operation mode hlid %d bw %d", + hlid, wide); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->hlid = hlid; + acx->bandwidth = wide ? WLCORE_BANDWIDTH_40MHZ : WLCORE_BANDWIDTH_20MHZ; + + ret = wl1271_cmd_configure(wl, ACX_PEER_HT_OPERATION_MODE_CFG, acx, + sizeof(*acx)); + + if (ret < 0) { + wl1271_warning("acx peer ht operation mode failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; + +} + +/* + * this command is basically the same as wl1271_acx_ht_capabilities, + * with the addition of supported rates. they should be unified in + * the next fw api change + */ +int wl18xx_acx_set_peer_cap(struct wl1271 *wl, + struct ieee80211_sta_ht_cap *ht_cap, + bool allow_ht_operation, + u32 rate_set, u8 hlid) +{ + struct wlcore_acx_peer_cap *acx; + int ret = 0; + u32 ht_capabilites = 0; + + wl1271_debug(DEBUG_ACX, + "acx set cap ht_supp: %d ht_cap: %d rates: 0x%x", + ht_cap->ht_supported, ht_cap->cap, rate_set); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + if (allow_ht_operation && ht_cap->ht_supported) { + /* no need to translate capabilities - use the spec values */ + ht_capabilites = ht_cap->cap; + + /* + * this bit is not employed by the spec but only by FW to + * indicate peer HT support + */ + ht_capabilites |= WL12XX_HT_CAP_HT_OPERATION; + + /* get data from A-MPDU parameters field */ + acx->ampdu_max_length = ht_cap->ampdu_factor; + acx->ampdu_min_spacing = ht_cap->ampdu_density; + } + + acx->hlid = hlid; + acx->ht_capabilites = cpu_to_le32(ht_capabilites); + acx->supported_rates = cpu_to_le32(rate_set); + + ret = wl1271_cmd_configure(wl, ACX_PEER_CAP, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx ht capabilities setting failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h index e2609a6b7341..0e636def1217 100644 --- a/drivers/net/wireless/ti/wl18xx/acx.h +++ b/drivers/net/wireless/ti/wl18xx/acx.h @@ -26,7 +26,13 @@ #include "../wlcore/acx.h" enum { - ACX_CLEAR_STATISTICS = 0x0047, + ACX_NS_IPV6_FILTER = 0x0050, + ACX_PEER_HT_OPERATION_MODE_CFG = 0x0051, + ACX_CSUM_CONFIG = 0x0052, + ACX_SIM_CONFIG = 0x0053, + ACX_CLEAR_STATISTICS = 0x0054, + ACX_AUTO_RX_STREAMING = 0x0055, + ACX_PEER_CAP = 0x0056 }; /* numbers of bits the length field takes (add 1 for the actual number) */ @@ -278,10 +284,57 @@ struct wl18xx_acx_clear_statistics { struct acx_header header; }; +enum wlcore_bandwidth { + WLCORE_BANDWIDTH_20MHZ, + WLCORE_BANDWIDTH_40MHZ, +}; + +struct wlcore_peer_ht_operation_mode { + struct acx_header header; + + u8 hlid; + u8 bandwidth; /* enum wlcore_bandwidth */ + u8 padding[2]; +}; + +/* + * ACX_PEER_CAP + * this struct is very similar to wl1271_acx_ht_capabilities, with the + * addition of supported rates + */ +struct wlcore_acx_peer_cap { + struct acx_header header; + + /* bitmask of capability bits supported by the peer */ + __le32 ht_capabilites; + + /* rates supported by the remote peer */ + __le32 supported_rates; + + /* Indicates to which link these capabilities apply. */ + u8 hlid; + + /* + * This the maximum A-MPDU length supported by the AP. The FW may not + * exceed this length when sending A-MPDUs + */ + u8 ampdu_max_length; + + /* This is the minimal spacing required when sending A-MPDUs to the AP*/ + u8 ampdu_min_spacing; + + u8 padding; +} __packed; + int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap, u32 sdio_blk_size, u32 extra_mem_blks, u32 len_field_size); int wl18xx_acx_set_checksum_state(struct wl1271 *wl); int wl18xx_acx_clear_statistics(struct wl1271 *wl); +int wl18xx_acx_peer_ht_operation_mode(struct wl1271 *wl, u8 hlid, bool wide); +int wl18xx_acx_set_peer_cap(struct wl1271 *wl, + struct ieee80211_sta_ht_cap *ht_cap, + bool allow_ht_operation, + u32 rate_set, u8 hlid); #endif /* __WL18XX_ACX_H__ */ diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c new file mode 100644 index 000000000000..1d1f6cc7a50a --- /dev/null +++ b/drivers/net/wireless/ti/wl18xx/cmd.c @@ -0,0 +1,80 @@ +/* + * This file is part of wl18xx + * + * Copyright (C) 2011 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "../wlcore/cmd.h" +#include "../wlcore/debug.h" +#include "../wlcore/hw_ops.h" + +#include "cmd.h" + +int wl18xx_cmd_channel_switch(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct ieee80211_channel_switch *ch_switch) +{ + struct wl18xx_cmd_channel_switch *cmd; + u32 supported_rates; + int ret; + + wl1271_debug(DEBUG_ACX, "cmd channel switch"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->role_id = wlvif->role_id; + cmd->channel = ch_switch->channel->hw_value; + cmd->switch_time = ch_switch->count; + cmd->stop_tx = ch_switch->block_tx; + + switch (ch_switch->channel->band) { + case IEEE80211_BAND_2GHZ: + cmd->band = WLCORE_BAND_2_4GHZ; + break; + case IEEE80211_BAND_5GHZ: + cmd->band = WLCORE_BAND_5GHZ; + break; + default: + wl1271_error("invalid channel switch band: %d", + ch_switch->channel->band); + ret = -EINVAL; + goto out_free; + } + + supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES | + wlcore_hw_sta_get_ap_rate_mask(wl, wlvif); + if (wlvif->p2p) + supported_rates &= ~CONF_TX_CCK_RATES; + cmd->local_supported_rates = cpu_to_le32(supported_rates); + cmd->channel_type = wlvif->channel_type; + + ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to send channel switch command"); + goto out_free; + } + +out_free: + kfree(cmd); +out: + return ret; +} diff --git a/drivers/net/wireless/ti/wl18xx/cmd.h b/drivers/net/wireless/ti/wl18xx/cmd.h new file mode 100644 index 000000000000..6687d10899ac --- /dev/null +++ b/drivers/net/wireless/ti/wl18xx/cmd.h @@ -0,0 +1,52 @@ +/* + * This file is part of wl18xx + * + * Copyright (C) 2011 Texas Instruments. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL18XX_CMD_H__ +#define __WL18XX_CMD_H__ + +#include "../wlcore/wlcore.h" +#include "../wlcore/acx.h" + +struct wl18xx_cmd_channel_switch { + struct wl1271_cmd_header header; + + u8 role_id; + + /* The new serving channel */ + u8 channel; + /* Relative time of the serving channel switch in TBTT units */ + u8 switch_time; + /* Stop the role TX, should expect it after radar detection */ + u8 stop_tx; + + __le32 local_supported_rates; + + u8 channel_type; + u8 band; + + u8 padding[2]; +} __packed; + +int wl18xx_cmd_channel_switch(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct ieee80211_channel_switch *ch_switch); + +#endif diff --git a/drivers/net/wireless/ti/wl18xx/conf.h b/drivers/net/wireless/ti/wl18xx/conf.h index 4d426cc20274..e34302e3b51d 100644 --- a/drivers/net/wireless/ti/wl18xx/conf.h +++ b/drivers/net/wireless/ti/wl18xx/conf.h @@ -23,20 +23,21 @@ #define __WL18XX_CONF_H__ #define WL18XX_CONF_MAGIC 0x10e100ca -#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0003) +#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0006) #define WL18XX_CONF_MASK 0x0000ffff #define WL18XX_CONF_SIZE (WLCORE_CONF_SIZE + \ sizeof(struct wl18xx_priv_conf)) #define NUM_OF_CHANNELS_11_ABG 150 #define NUM_OF_CHANNELS_11_P 7 -#define WL18XX_NUM_OF_SUB_BANDS 9 #define SRF_TABLE_LEN 16 #define PIN_MUXING_SIZE 2 +#define WL18XX_TRACE_LOSS_GAPS_TX 10 +#define WL18XX_TRACE_LOSS_GAPS_RX 18 struct wl18xx_mac_and_phy_params { u8 phy_standalone; - u8 rdl; + u8 spare0; u8 enable_clpc; u8 enable_tx_low_pwr_on_siso_rdl; u8 auto_detect; @@ -69,18 +70,27 @@ struct wl18xx_mac_and_phy_params { u8 pwr_limit_reference_11_abg; u8 per_chan_pwr_limit_arr_11p[NUM_OF_CHANNELS_11_P]; u8 pwr_limit_reference_11p; - u8 per_sub_band_tx_trace_loss[WL18XX_NUM_OF_SUB_BANDS]; - u8 per_sub_band_rx_trace_loss[WL18XX_NUM_OF_SUB_BANDS]; + u8 spare1; + u8 per_chan_bo_mode_11_abg[13]; + u8 per_chan_bo_mode_11_p[4]; u8 primary_clock_setting_time; u8 clock_valid_on_wake_up; u8 secondary_clock_setting_time; u8 board_type; /* enable point saturation */ u8 psat; - /* low/medium/high Tx power in dBm */ + /* low/medium/high Tx power in dBm for STA-HP BG */ s8 low_power_val; s8 med_power_val; s8 high_power_val; + s8 per_sub_band_tx_trace_loss[WL18XX_TRACE_LOSS_GAPS_TX]; + s8 per_sub_band_rx_trace_loss[WL18XX_TRACE_LOSS_GAPS_RX]; + u8 tx_rf_margin; + /* low/medium/high Tx power in dBm for other role */ + s8 low_power_val_2nd; + s8 med_power_val_2nd; + s8 high_power_val_2nd; + u8 padding[1]; } __packed; diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c new file mode 100644 index 000000000000..c9199d7804c6 --- /dev/null +++ b/drivers/net/wireless/ti/wl18xx/event.c @@ -0,0 +1,111 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2012 Texas Instruments. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "event.h" +#include "scan.h" +#include "../wlcore/cmd.h" +#include "../wlcore/debug.h" + +int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, + bool *timeout) +{ + u32 local_event; + + switch (event) { + case WLCORE_EVENT_PEER_REMOVE_COMPLETE: + local_event = PEER_REMOVE_COMPLETE_EVENT_ID; + break; + + case WLCORE_EVENT_DFS_CONFIG_COMPLETE: + local_event = DFS_CHANNELS_CONFIG_COMPLETE_EVENT; + break; + + default: + /* event not implemented */ + return 0; + } + return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout); +} + +int wl18xx_process_mailbox_events(struct wl1271 *wl) +{ + struct wl18xx_event_mailbox *mbox = wl->mbox; + u32 vector; + + vector = le32_to_cpu(mbox->events_vector); + wl1271_debug(DEBUG_EVENT, "MBOX vector: 0x%x", vector); + + if (vector & SCAN_COMPLETE_EVENT_ID) { + wl1271_debug(DEBUG_EVENT, "scan results: %d", + mbox->number_of_scan_results); + + if (wl->scan_wlvif) + wl18xx_scan_completed(wl, wl->scan_wlvif); + } + + if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) { + wl1271_debug(DEBUG_EVENT, + "PERIODIC_SCAN_REPORT_EVENT (results %d)", + mbox->number_of_sched_scan_results); + + wlcore_scan_sched_scan_results(wl); + } + + if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID) + wlcore_event_sched_scan_completed(wl, 1); + + if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) + wlcore_event_rssi_trigger(wl, mbox->rssi_snr_trigger_metric); + + if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) + wlcore_event_ba_rx_constraint(wl, + le16_to_cpu(mbox->rx_ba_role_id_bitmap), + le16_to_cpu(mbox->rx_ba_allowed_bitmap)); + + if (vector & BSS_LOSS_EVENT_ID) + wlcore_event_beacon_loss(wl, + le16_to_cpu(mbox->bss_loss_bitmap)); + + if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) + wlcore_event_channel_switch(wl, + le16_to_cpu(mbox->channel_switch_role_id_bitmap), + true); + + if (vector & DUMMY_PACKET_EVENT_ID) + wlcore_event_dummy_packet(wl); + + /* + * "TX retries exceeded" has a different meaning according to mode. + * In AP mode the offending station is disconnected. + */ + if (vector & MAX_TX_FAILURE_EVENT_ID) + wlcore_event_max_tx_failure(wl, + le32_to_cpu(mbox->tx_retry_exceeded_bitmap)); + + if (vector & INACTIVE_STA_EVENT_ID) + wlcore_event_inactive_sta(wl, + le32_to_cpu(mbox->inactive_sta_bitmap)); + + if (vector & REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID) + wlcore_event_roc_complete(wl); + + return 0; +} diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h new file mode 100644 index 000000000000..398f3d2c0a6c --- /dev/null +++ b/drivers/net/wireless/ti/wl18xx/event.h @@ -0,0 +1,77 @@ +/* + * This file is part of wl18xx + * + * Copyright (C) 2012 Texas Instruments. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL18XX_EVENT_H__ +#define __WL18XX_EVENT_H__ + +#include "../wlcore/wlcore.h" + +enum { + SCAN_COMPLETE_EVENT_ID = BIT(8), + RADAR_DETECTED_EVENT_ID = BIT(9), + CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(10), + BSS_LOSS_EVENT_ID = BIT(11), + MAX_TX_FAILURE_EVENT_ID = BIT(12), + DUMMY_PACKET_EVENT_ID = BIT(13), + INACTIVE_STA_EVENT_ID = BIT(14), + PEER_REMOVE_COMPLETE_EVENT_ID = BIT(15), + PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(16), + BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(17), + REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(18), + DFS_CHANNELS_CONFIG_COMPLETE_EVENT = BIT(19), + PERIODIC_SCAN_REPORT_EVENT_ID = BIT(20), +}; + +struct wl18xx_event_mailbox { + __le32 events_vector; + + u8 number_of_scan_results; + u8 number_of_sched_scan_results; + + __le16 channel_switch_role_id_bitmap; + + s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS]; + + /* bitmap of removed links */ + __le32 hlid_removed_bitmap; + + /* rx ba constraint */ + __le16 rx_ba_role_id_bitmap; /* 0xfff means any role. */ + __le16 rx_ba_allowed_bitmap; + + /* bitmap of roc completed (by role id) */ + __le16 roc_completed_bitmap; + + /* bitmap of stations (by role id) with bss loss */ + __le16 bss_loss_bitmap; + + /* bitmap of stations (by HLID) which exceeded max tx retries */ + __le32 tx_retry_exceeded_bitmap; + + /* bitmap of inactive stations (by HLID) */ + __le32 inactive_sta_bitmap; +} __packed; + +int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, + bool *timeout); +int wl18xx_process_mailbox_events(struct wl1271 *wl); + +#endif diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 8d8c1f8c63b7..da3ef1b10a9c 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -34,10 +34,13 @@ #include "reg.h" #include "conf.h" +#include "cmd.h" #include "acx.h" #include "tx.h" #include "wl18xx.h" #include "io.h" +#include "scan.h" +#include "event.h" #include "debugfs.h" #define WL18XX_RX_CHECKSUM_MASK 0x40 @@ -334,6 +337,8 @@ static struct wlcore_conf wl18xx_conf = { .tmpl_short_retry_limit = 10, .tmpl_long_retry_limit = 10, .tx_watchdog_timeout = 5000, + .slow_link_thold = 3, + .fast_link_thold = 30, }, .conn = { .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, @@ -391,8 +396,10 @@ static struct wlcore_conf wl18xx_conf = { .scan = { .min_dwell_time_active = 7500, .max_dwell_time_active = 30000, - .min_dwell_time_passive = 100000, - .max_dwell_time_passive = 100000, + .min_dwell_time_active_long = 25000, + .max_dwell_time_active_long = 50000, + .dwell_time_passive = 100000, + .dwell_time_dfs = 150000, .num_probe_reqs = 2, .split_scan_timeout = 50000, }, @@ -489,6 +496,10 @@ static struct wlcore_conf wl18xx_conf = { .increase_time = 1, .window_size = 16, }, + .recovery = { + .bug_on_recovery = 0, + .no_recovery = 0, + }, }; static struct wl18xx_priv_conf wl18xx_default_priv_conf = { @@ -501,7 +512,6 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = { .clock_valid_on_wake_up = 0x00, .secondary_clock_setting_time = 0x05, .board_type = BOARD_TYPE_HDK_18XX, - .rdl = 0x01, .auto_detect = 0x00, .dedicated_fem = FEM_NONE, .low_band_component = COMPONENT_3_WAY_SWITCH, @@ -517,14 +527,44 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = { .enable_clpc = 0x00, .enable_tx_low_pwr_on_siso_rdl = 0x00, .rx_profile = 0x00, - .pwr_limit_reference_11_abg = 0xc8, + .pwr_limit_reference_11_abg = 0x64, + .per_chan_pwr_limit_arr_11abg = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .pwr_limit_reference_11p = 0x64, + .per_chan_bo_mode_11_abg = { 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00 }, + .per_chan_bo_mode_11_p = { 0x00, 0x00, 0x00, 0x00 }, + .per_chan_pwr_limit_arr_11p = { 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff }, .psat = 0, - .low_power_val = 0x00, - .med_power_val = 0x0a, - .high_power_val = 0x1e, + .low_power_val = 0x08, + .med_power_val = 0x12, + .high_power_val = 0x18, + .low_power_val_2nd = 0x05, + .med_power_val_2nd = 0x0a, + .high_power_val_2nd = 0x14, .external_pa_dc2dc = 0, - .number_of_assembled_ant2_4 = 1, + .number_of_assembled_ant2_4 = 2, .number_of_assembled_ant5 = 1, + .tx_rf_margin = 1, }, }; @@ -595,7 +635,7 @@ static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = { }; /* TODO: maybe move to a new header file? */ -#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw.bin" +#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-2.bin" static int wl18xx_identify_chip(struct wl1271 *wl) { @@ -608,15 +648,18 @@ static int wl18xx_identify_chip(struct wl1271 *wl) wl->sr_fw_name = WL18XX_FW_NAME; /* wl18xx uses the same firmware for PLT */ wl->plt_fw_name = WL18XX_FW_NAME; - wl->quirks |= WLCORE_QUIRK_NO_ELP | - WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN | + wl->quirks |= WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN | WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN | WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN | - WLCORE_QUIRK_TX_PAD_LAST_FRAME; - - wlcore_set_min_fw_ver(wl, WL18XX_CHIP_VER, WL18XX_IFTYPE_VER, - WL18XX_MAJOR_VER, WL18XX_SUBTYPE_VER, - WL18XX_MINOR_VER); + WLCORE_QUIRK_TX_PAD_LAST_FRAME | + WLCORE_QUIRK_REGDOMAIN_CONF | + WLCORE_QUIRK_DUAL_PROBE_TMPL; + + wlcore_set_min_fw_ver(wl, WL18XX_CHIP_VER, + WL18XX_IFTYPE_VER, WL18XX_MAJOR_VER, + WL18XX_SUBTYPE_VER, WL18XX_MINOR_VER, + /* there's no separate multi-role FW */ + 0, 0, 0, 0); break; case CHIP_ID_185x_PG10: wl1271_warning("chip id 0x%x (185x PG10) is deprecated", @@ -630,6 +673,11 @@ static int wl18xx_identify_chip(struct wl1271 *wl) goto out; } + wl->scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4; + wl->scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5; + wl->sched_scan_templ_id_2_4 = CMD_TEMPL_PROBE_REQ_2_4_PERIODIC; + wl->sched_scan_templ_id_5 = CMD_TEMPL_PROBE_REQ_5_PERIODIC; + wl->max_channels_5 = WL18XX_MAX_CHANNELS_5GHZ; out: return ret; } @@ -843,6 +891,20 @@ static int wl18xx_boot(struct wl1271 *wl) if (ret < 0) goto out; + wl->event_mask = BSS_LOSS_EVENT_ID | + SCAN_COMPLETE_EVENT_ID | + RSSI_SNR_TRIGGER_0_EVENT_ID | + PERIODIC_SCAN_COMPLETE_EVENT_ID | + PERIODIC_SCAN_REPORT_EVENT_ID | + DUMMY_PACKET_EVENT_ID | + PEER_REMOVE_COMPLETE_EVENT_ID | + BA_SESSION_RX_CONSTRAINT_EVENT_ID | + REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID | + INACTIVE_STA_EVENT_ID | + MAX_TX_FAILURE_EVENT_ID | + CHANNEL_SWITCH_COMPLETE_EVENT_ID | + DFS_CHANNELS_CONFIG_COMPLETE_EVENT; + ret = wlcore_boot_run_firmware(wl); if (ret < 0) goto out; @@ -964,7 +1026,7 @@ static int wl18xx_hw_init(struct wl1271 *wl) /* (re)init private structures. Relevant on recovery as well. */ priv->last_fw_rls_idx = 0; - priv->extra_spare_vif_count = 0; + priv->extra_spare_key_count = 0; /* set the default amount of spare blocks in the bitmap */ ret = wl18xx_set_host_cfg_bitmap(wl, WL18XX_TX_HW_BLOCK_SPARE); @@ -1022,7 +1084,12 @@ static bool wl18xx_is_mimo_supported(struct wl1271 *wl) { struct wl18xx_priv *priv = wl->priv; - return priv->conf.phy.number_of_assembled_ant2_4 >= 2; + /* only support MIMO with multiple antennas, and when SISO + * is not forced through config + */ + return (priv->conf.phy.number_of_assembled_ant2_4 >= 2) && + (priv->conf.ht.mode != HT_MODE_WIDE) && + (priv->conf.ht.mode != HT_MODE_SISO20); } /* @@ -1223,8 +1290,8 @@ static int wl18xx_get_spare_blocks(struct wl1271 *wl, bool is_gem) { struct wl18xx_priv *priv = wl->priv; - /* If we have VIFs requiring extra spare, indulge them */ - if (priv->extra_spare_vif_count) + /* If we have keys requiring extra spare, indulge them */ + if (priv->extra_spare_key_count) return WL18XX_TX_HW_EXTRA_BLOCK_SPARE; return WL18XX_TX_HW_BLOCK_SPARE; @@ -1236,42 +1303,48 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd, struct ieee80211_key_conf *key_conf) { struct wl18xx_priv *priv = wl->priv; - bool change_spare = false; + bool change_spare = false, special_enc; int ret; + wl1271_debug(DEBUG_CRYPT, "extra spare keys before: %d", + priv->extra_spare_key_count); + + special_enc = key_conf->cipher == WL1271_CIPHER_SUITE_GEM || + key_conf->cipher == WLAN_CIPHER_SUITE_TKIP; + + ret = wlcore_set_key(wl, cmd, vif, sta, key_conf); + if (ret < 0) + goto out; + /* - * when adding the first or removing the last GEM/TKIP interface, + * when adding the first or removing the last GEM/TKIP key, * we have to adjust the number of spare blocks. */ - change_spare = (key_conf->cipher == WL1271_CIPHER_SUITE_GEM || - key_conf->cipher == WLAN_CIPHER_SUITE_TKIP) && - ((priv->extra_spare_vif_count == 0 && cmd == SET_KEY) || - (priv->extra_spare_vif_count == 1 && cmd == DISABLE_KEY)); + if (special_enc) { + if (cmd == SET_KEY) { + /* first key */ + change_spare = (priv->extra_spare_key_count == 0); + priv->extra_spare_key_count++; + } else if (cmd == DISABLE_KEY) { + /* last key */ + change_spare = (priv->extra_spare_key_count == 1); + priv->extra_spare_key_count--; + } + } - /* no need to change spare - just regular set_key */ - if (!change_spare) - return wlcore_set_key(wl, cmd, vif, sta, key_conf); + wl1271_debug(DEBUG_CRYPT, "extra spare keys after: %d", + priv->extra_spare_key_count); - ret = wlcore_set_key(wl, cmd, vif, sta, key_conf); - if (ret < 0) + if (!change_spare) goto out; /* key is now set, change the spare blocks */ - if (cmd == SET_KEY) { + if (priv->extra_spare_key_count) ret = wl18xx_set_host_cfg_bitmap(wl, WL18XX_TX_HW_EXTRA_BLOCK_SPARE); - if (ret < 0) - goto out; - - priv->extra_spare_vif_count++; - } else { + else ret = wl18xx_set_host_cfg_bitmap(wl, WL18XX_TX_HW_BLOCK_SPARE); - if (ret < 0) - goto out; - - priv->extra_spare_vif_count--; - } out: return ret; @@ -1296,6 +1369,92 @@ static u32 wl18xx_pre_pkt_send(struct wl1271 *wl, return buf_offset; } +static void wl18xx_sta_rc_update(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct ieee80211_sta *sta, + u32 changed) +{ + bool wide = sta->bandwidth >= IEEE80211_STA_RX_BW_40; + + wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update wide %d", wide); + + if (!(changed & IEEE80211_RC_BW_CHANGED)) + return; + + mutex_lock(&wl->mutex); + + /* sanity */ + if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS)) + goto out; + + /* ignore the change before association */ + if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + goto out; + + /* + * If we started out as wide, we can change the operation mode. If we + * thought this was a 20mhz AP, we have to reconnect + */ + if (wlvif->sta.role_chan_type == NL80211_CHAN_HT40MINUS || + wlvif->sta.role_chan_type == NL80211_CHAN_HT40PLUS) + wl18xx_acx_peer_ht_operation_mode(wl, wlvif->sta.hlid, wide); + else + ieee80211_connection_loss(wl12xx_wlvif_to_vif(wlvif)); + +out: + mutex_unlock(&wl->mutex); +} + +static int wl18xx_set_peer_cap(struct wl1271 *wl, + struct ieee80211_sta_ht_cap *ht_cap, + bool allow_ht_operation, + u32 rate_set, u8 hlid) +{ + return wl18xx_acx_set_peer_cap(wl, ht_cap, allow_ht_operation, + rate_set, hlid); +} + +static bool wl18xx_lnk_high_prio(struct wl1271 *wl, u8 hlid, + struct wl1271_link *lnk) +{ + u8 thold; + struct wl18xx_fw_status_priv *status_priv = + (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv; + u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap); + + /* suspended links are never high priority */ + if (test_bit(hlid, (unsigned long *)&suspend_bitmap)) + return false; + + /* the priority thresholds are taken from FW */ + if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map) && + !test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map)) + thold = status_priv->tx_fast_link_prio_threshold; + else + thold = status_priv->tx_slow_link_prio_threshold; + + return lnk->allocated_pkts < thold; +} + +static bool wl18xx_lnk_low_prio(struct wl1271 *wl, u8 hlid, + struct wl1271_link *lnk) +{ + u8 thold; + struct wl18xx_fw_status_priv *status_priv = + (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv; + u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap); + + if (test_bit(hlid, (unsigned long *)&suspend_bitmap)) + thold = status_priv->tx_suspend_threshold; + else if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map) && + !test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map)) + thold = status_priv->tx_fast_stop_threshold; + else + thold = status_priv->tx_slow_stop_threshold; + + return lnk->allocated_pkts < thold; +} + static int wl18xx_setup(struct wl1271 *wl); static struct wlcore_ops wl18xx_ops = { @@ -1305,6 +1464,8 @@ static struct wlcore_ops wl18xx_ops = { .plt_init = wl18xx_plt_init, .trigger_cmd = wl18xx_trigger_cmd, .ack_event = wl18xx_ack_event, + .wait_for_event = wl18xx_wait_for_event, + .process_mailbox_events = wl18xx_process_mailbox_events, .calc_tx_blocks = wl18xx_calc_tx_blocks, .set_tx_desc_blocks = wl18xx_set_tx_desc_blocks, .set_tx_desc_data_len = wl18xx_set_tx_desc_data_len, @@ -1320,16 +1481,26 @@ static struct wlcore_ops wl18xx_ops = { .ap_get_mimo_wide_rate_mask = wl18xx_ap_get_mimo_wide_rate_mask, .get_mac = wl18xx_get_mac, .debugfs_init = wl18xx_debugfs_add_files, + .scan_start = wl18xx_scan_start, + .scan_stop = wl18xx_scan_stop, + .sched_scan_start = wl18xx_sched_scan_start, + .sched_scan_stop = wl18xx_scan_sched_scan_stop, .handle_static_data = wl18xx_handle_static_data, .get_spare_blocks = wl18xx_get_spare_blocks, .set_key = wl18xx_set_key, + .channel_switch = wl18xx_cmd_channel_switch, .pre_pkt_send = wl18xx_pre_pkt_send, + .sta_rc_update = wl18xx_sta_rc_update, + .set_peer_cap = wl18xx_set_peer_cap, + .lnk_high_prio = wl18xx_lnk_high_prio, + .lnk_low_prio = wl18xx_lnk_low_prio, }; /* HT cap appropriate for wide channels in 2Ghz */ static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_2ghz = { .cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | - IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40, + IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40 | + IEEE80211_HT_CAP_GRN_FLD, .ht_supported = true, .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, @@ -1343,7 +1514,8 @@ static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_2ghz = { /* HT cap appropriate for wide channels in 5Ghz */ static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_5ghz = { .cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | - IEEE80211_HT_CAP_SUP_WIDTH_20_40, + IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD, .ht_supported = true, .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, @@ -1356,7 +1528,8 @@ static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_5ghz = { /* HT cap appropriate for SISO 20 */ static struct ieee80211_sta_ht_cap wl18xx_siso20_ht_cap = { - .cap = IEEE80211_HT_CAP_SGI_20, + .cap = IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_GRN_FLD, .ht_supported = true, .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, @@ -1369,7 +1542,8 @@ static struct ieee80211_sta_ht_cap wl18xx_siso20_ht_cap = { /* HT cap appropriate for MIMO rates in 20mhz channel */ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = { - .cap = IEEE80211_HT_CAP_SGI_20, + .cap = IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_GRN_FLD, .ht_supported = true, .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, @@ -1387,7 +1561,8 @@ static int wl18xx_setup(struct wl1271 *wl) wl->rtable = wl18xx_rtable; wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS; - wl->num_rx_desc = WL18XX_NUM_TX_DESCRIPTORS; + wl->num_rx_desc = WL18XX_NUM_RX_DESCRIPTORS; + wl->num_channels = 2; wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES; wl->band_rate_to_idx = wl18xx_band_rate_to_idx; wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX; @@ -1506,7 +1681,8 @@ static int wl18xx_probe(struct platform_device *pdev) int ret; hw = wlcore_alloc_hw(sizeof(struct wl18xx_priv), - WL18XX_AGGR_BUFFER_SIZE); + WL18XX_AGGR_BUFFER_SIZE, + sizeof(struct wl18xx_event_mailbox)); if (IS_ERR(hw)) { wl1271_error("can't allocate hw"); ret = PTR_ERR(hw); diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c new file mode 100644 index 000000000000..09d944505ac0 --- /dev/null +++ b/drivers/net/wireless/ti/wl18xx/scan.c @@ -0,0 +1,326 @@ +/* + * This file is part of wl18xx + * + * Copyright (C) 2012 Texas Instruments. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include <linux/ieee80211.h> +#include "scan.h" +#include "../wlcore/debug.h" + +static void wl18xx_adjust_channels(struct wl18xx_cmd_scan_params *cmd, + struct wlcore_scan_channels *cmd_channels) +{ + memcpy(cmd->passive, cmd_channels->passive, sizeof(cmd->passive)); + memcpy(cmd->active, cmd_channels->active, sizeof(cmd->active)); + cmd->dfs = cmd_channels->dfs; + cmd->passive_active = cmd_channels->passive_active; + + memcpy(cmd->channels_2, cmd_channels->channels_2, + sizeof(cmd->channels_2)); + memcpy(cmd->channels_5, cmd_channels->channels_5, + sizeof(cmd->channels_2)); + /* channels_4 are not supported, so no need to copy them */ +} + +static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct cfg80211_scan_request *req) +{ + struct wl18xx_cmd_scan_params *cmd; + struct wlcore_scan_channels *cmd_channels = NULL; + int ret; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->role_id = wlvif->role_id; + + if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) { + ret = -EINVAL; + goto out; + } + + cmd->scan_type = SCAN_TYPE_SEARCH; + cmd->rssi_threshold = -127; + cmd->snr_threshold = 0; + + cmd->bss_type = SCAN_BSS_TYPE_ANY; + + cmd->ssid_from_list = 0; + cmd->filter = 0; + cmd->add_broadcast = 0; + + cmd->urgency = 0; + cmd->protect = 0; + + cmd->n_probe_reqs = wl->conf.scan.num_probe_reqs; + cmd->terminate_after = 0; + + /* configure channels */ + WARN_ON(req->n_ssids > 1); + + cmd_channels = kzalloc(sizeof(*cmd_channels), GFP_KERNEL); + if (!cmd_channels) { + ret = -ENOMEM; + goto out; + } + + wlcore_set_scan_chan_params(wl, cmd_channels, req->channels, + req->n_channels, req->n_ssids, + SCAN_TYPE_SEARCH); + wl18xx_adjust_channels(cmd, cmd_channels); + + /* + * all the cycles params (except total cycles) should + * remain 0 for normal scan + */ + cmd->total_cycles = 1; + + if (req->no_cck) + cmd->rate = WL18XX_SCAN_RATE_6; + + cmd->tag = WL1271_SCAN_DEFAULT_TAG; + + if (req->n_ssids) { + cmd->ssid_len = req->ssids[0].ssid_len; + memcpy(cmd->ssid, req->ssids[0].ssid, cmd->ssid_len); + } + + /* TODO: per-band ies? */ + if (cmd->active[0]) { + u8 band = IEEE80211_BAND_2GHZ; + ret = wl12xx_cmd_build_probe_req(wl, wlvif, + cmd->role_id, band, + req->ssids ? req->ssids[0].ssid : NULL, + req->ssids ? req->ssids[0].ssid_len : 0, + req->ie, + req->ie_len, + false); + if (ret < 0) { + wl1271_error("2.4GHz PROBE request template failed"); + goto out; + } + } + + if (cmd->active[1] || cmd->dfs) { + u8 band = IEEE80211_BAND_5GHZ; + ret = wl12xx_cmd_build_probe_req(wl, wlvif, + cmd->role_id, band, + req->ssids ? req->ssids[0].ssid : NULL, + req->ssids ? req->ssids[0].ssid_len : 0, + req->ie, + req->ie_len, + false); + if (ret < 0) { + wl1271_error("5GHz PROBE request template failed"); + goto out; + } + } + + wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd)); + + ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("SCAN failed"); + goto out; + } + +out: + kfree(cmd_channels); + kfree(cmd); + return ret; +} + +void wl18xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + wl->scan.failed = false; + cancel_delayed_work(&wl->scan_complete_work); + ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, + msecs_to_jiffies(0)); +} + +static +int wl18xx_scan_sched_scan_config(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_sched_scan_ies *ies) +{ + struct wl18xx_cmd_scan_params *cmd; + struct wlcore_scan_channels *cmd_channels = NULL; + struct conf_sched_scan_settings *c = &wl->conf.sched_scan; + int ret; + int filter_type; + + wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config"); + + filter_type = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req); + if (filter_type < 0) + return filter_type; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->role_id = wlvif->role_id; + + if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) { + ret = -EINVAL; + goto out; + } + + cmd->scan_type = SCAN_TYPE_PERIODIC; + cmd->rssi_threshold = c->rssi_threshold; + cmd->snr_threshold = c->snr_threshold; + + /* don't filter on BSS type */ + cmd->bss_type = SCAN_BSS_TYPE_ANY; + + cmd->ssid_from_list = 1; + if (filter_type == SCAN_SSID_FILTER_LIST) + cmd->filter = 1; + cmd->add_broadcast = 0; + + cmd->urgency = 0; + cmd->protect = 0; + + cmd->n_probe_reqs = c->num_probe_reqs; + /* don't stop scanning automatically when something is found */ + cmd->terminate_after = 0; + + cmd_channels = kzalloc(sizeof(*cmd_channels), GFP_KERNEL); + if (!cmd_channels) { + ret = -ENOMEM; + goto out; + } + + /* configure channels */ + wlcore_set_scan_chan_params(wl, cmd_channels, req->channels, + req->n_channels, req->n_ssids, + SCAN_TYPE_PERIODIC); + wl18xx_adjust_channels(cmd, cmd_channels); + + cmd->short_cycles_sec = 0; + cmd->long_cycles_sec = cpu_to_le16(req->interval); + cmd->short_cycles_count = 0; + + cmd->total_cycles = 0; + + cmd->tag = WL1271_SCAN_DEFAULT_TAG; + + /* create a PERIODIC_SCAN_REPORT_EVENT whenever we've got a match */ + cmd->report_threshold = 1; + cmd->terminate_on_report = 0; + + if (cmd->active[0]) { + u8 band = IEEE80211_BAND_2GHZ; + ret = wl12xx_cmd_build_probe_req(wl, wlvif, + cmd->role_id, band, + req->ssids ? req->ssids[0].ssid : NULL, + req->ssids ? req->ssids[0].ssid_len : 0, + ies->ie[band], + ies->len[band], + true); + if (ret < 0) { + wl1271_error("2.4GHz PROBE request template failed"); + goto out; + } + } + + if (cmd->active[1] || cmd->dfs) { + u8 band = IEEE80211_BAND_5GHZ; + ret = wl12xx_cmd_build_probe_req(wl, wlvif, + cmd->role_id, band, + req->ssids ? req->ssids[0].ssid : NULL, + req->ssids ? req->ssids[0].ssid_len : 0, + ies->ie[band], + ies->len[band], + true); + if (ret < 0) { + wl1271_error("5GHz PROBE request template failed"); + goto out; + } + } + + wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd)); + + ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("SCAN failed"); + goto out; + } + +out: + kfree(cmd_channels); + kfree(cmd); + return ret; +} + +int wl18xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_sched_scan_ies *ies) +{ + return wl18xx_scan_sched_scan_config(wl, wlvif, req, ies); +} + +static int __wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 scan_type) +{ + struct wl18xx_cmd_scan_stop *stop; + int ret; + + wl1271_debug(DEBUG_CMD, "cmd periodic scan stop"); + + stop = kzalloc(sizeof(*stop), GFP_KERNEL); + if (!stop) { + wl1271_error("failed to alloc memory to send sched scan stop"); + return -ENOMEM; + } + + stop->role_id = wlvif->role_id; + stop->scan_type = scan_type; + + ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, stop, sizeof(*stop), 0); + if (ret < 0) { + wl1271_error("failed to send sched scan stop command"); + goto out_free; + } + +out_free: + kfree(stop); + return ret; +} + +void wl18xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + __wl18xx_scan_stop(wl, wlvif, SCAN_TYPE_PERIODIC); +} +int wl18xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct cfg80211_scan_request *req) +{ + return wl18xx_scan_send(wl, wlvif, req); +} + +int wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + return __wl18xx_scan_stop(wl, wlvif, SCAN_TYPE_SEARCH); +} diff --git a/drivers/net/wireless/ti/wl18xx/scan.h b/drivers/net/wireless/ti/wl18xx/scan.h new file mode 100644 index 000000000000..eadee42689d1 --- /dev/null +++ b/drivers/net/wireless/ti/wl18xx/scan.h @@ -0,0 +1,127 @@ +/* + * This file is part of wl18xx + * + * Copyright (C) 2012 Texas Instruments. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL18XX_SCAN_H__ +#define __WL18XX_SCAN_H__ + +#include "../wlcore/wlcore.h" +#include "../wlcore/cmd.h" +#include "../wlcore/scan.h" + +struct tracking_ch_params { + struct conn_scan_ch_params channel; + + __le32 bssid_lsb; + __le16 bssid_msb; + + u8 padding[2]; +} __packed; + +/* probe request rate */ +enum +{ + WL18XX_SCAN_RATE_1 = 0, + WL18XX_SCAN_RATE_5_5 = 1, + WL18XX_SCAN_RATE_6 = 2, +}; + +#define WL18XX_MAX_CHANNELS_5GHZ 32 + +struct wl18xx_cmd_scan_params { + struct wl1271_cmd_header header; + + u8 role_id; + u8 scan_type; + + s8 rssi_threshold; /* for filtering (in dBm) */ + s8 snr_threshold; /* for filtering (in dB) */ + + u8 bss_type; /* for filtering */ + u8 ssid_from_list; /* use ssid from configured ssid list */ + u8 filter; /* forward only results with matching ssids */ + + /* + * add broadcast ssid in addition to the configured ssids. + * the driver should add dummy entry for it (?). + */ + u8 add_broadcast; + + u8 urgency; + u8 protect; /* ??? */ + u8 n_probe_reqs; /* Number of probes requests per channel */ + u8 terminate_after; /* early terminate scan operation */ + + u8 passive[SCAN_MAX_BANDS]; /* number of passive scan channels */ + u8 active[SCAN_MAX_BANDS]; /* number of active scan channels */ + u8 dfs; /* number of dfs channels in 5ghz */ + u8 passive_active; /* number of passive before active channels 2.4ghz */ + + __le16 short_cycles_sec; + __le16 long_cycles_sec; + u8 short_cycles_count; + u8 total_cycles; /* 0 - infinite */ + u8 padding[2]; + + union { + struct { + struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ]; + struct conn_scan_ch_params channels_5[WL18XX_MAX_CHANNELS_5GHZ]; + struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ]; + }; + struct tracking_ch_params channels_tracking[WL1271_SCAN_MAX_CHANNELS]; + } ; + + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len; /* For SCAN_SSID_FILTER_SPECIFIC */ + u8 tag; + u8 rate; + + /* send SCAN_REPORT_EVENT in periodic scans after each cycle + * if number of results >= report_threshold. Must be 0 for + * non periodic scans + */ + u8 report_threshold; + + /* Should periodic scan stop after a report event was created. + * Must be 0 for non periodic scans. + */ + u8 terminate_on_report; + + u8 padding1[3]; +} __packed; + +struct wl18xx_cmd_scan_stop { + struct wl1271_cmd_header header; + + u8 role_id; + u8 scan_type; + u8 padding[2]; +} __packed; + +int wl18xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct cfg80211_scan_request *req); +int wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif); +void wl18xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl18xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_sched_scan_ies *ies); +void wl18xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif); +#endif diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c index 5b1fb10d9fd7..57c694396647 100644 --- a/drivers/net/wireless/ti/wl18xx/tx.c +++ b/drivers/net/wireless/ti/wl18xx/tx.c @@ -28,6 +28,49 @@ #include "wl18xx.h" #include "tx.h" +static +void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif, + struct ieee80211_tx_rate *rate) +{ + u8 fw_rate = wl->fw_status_2->counters.tx_last_rate; + + if (fw_rate > CONF_HW_RATE_INDEX_MAX) { + wl1271_error("last Tx rate invalid: %d", fw_rate); + rate->idx = 0; + rate->flags = 0; + return; + } + + if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) { + rate->idx = fw_rate; + rate->flags = 0; + } else { + rate->flags = IEEE80211_TX_RC_MCS; + rate->idx = fw_rate - CONF_HW_RATE_INDEX_MCS0; + + /* SGI modifier is counted as a separate rate */ + if (fw_rate >= CONF_HW_RATE_INDEX_MCS7_SGI) + (rate->idx)--; + if (fw_rate == CONF_HW_RATE_INDEX_MCS15_SGI) + (rate->idx)--; + + /* this also covers the 40Mhz SGI case (= MCS15) */ + if (fw_rate == CONF_HW_RATE_INDEX_MCS7_SGI || + fw_rate == CONF_HW_RATE_INDEX_MCS15_SGI) + rate->flags |= IEEE80211_TX_RC_SHORT_GI; + + if (fw_rate > CONF_HW_RATE_INDEX_MCS7_SGI && vif) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + if (wlvif->channel_type == NL80211_CHAN_HT40MINUS || + wlvif->channel_type == NL80211_CHAN_HT40PLUS) { + /* adjustment needed for range 0-7 */ + rate->idx -= 8; + rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + } + } + } +} + static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte) { struct ieee80211_tx_info *info; @@ -44,7 +87,6 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte) /* a zero bit indicates Tx success */ tx_success = !(tx_stat_byte & BIT(WL18XX_TX_STATUS_STAT_BIT_IDX)); - skb = wl->tx_frames[id]; info = IEEE80211_SKB_CB(skb); @@ -56,11 +98,13 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte) /* update the TX status info */ if (tx_success && !(info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_ACK; + /* + * first pass info->control.vif while it's valid, and then fill out + * the info->status structures + */ + wl18xx_get_last_tx_rate(wl, info->control.vif, &info->status.rates[0]); - /* no real data about Tx completion */ - info->status.rates[0].idx = -1; - info->status.rates[0].count = 0; - info->status.rates[0].flags = 0; + info->status.rates[0].count = 1; /* no data about retries */ info->status.ack_signal = -1; if (!tx_success) diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h index 96a1e438d677..b6739e79efcf 100644 --- a/drivers/net/wireless/ti/wl18xx/wl18xx.h +++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h @@ -26,10 +26,10 @@ /* minimum FW required for driver */ #define WL18XX_CHIP_VER 8 -#define WL18XX_IFTYPE_VER 2 -#define WL18XX_MAJOR_VER 0 -#define WL18XX_SUBTYPE_VER 0 -#define WL18XX_MINOR_VER 100 +#define WL18XX_IFTYPE_VER 5 +#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE +#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE +#define WL18XX_MINOR_VER 28 #define WL18XX_CMD_MAX_SIZE 740 @@ -49,8 +49,8 @@ struct wl18xx_priv { /* Index of last released Tx desc in FW */ u8 last_fw_rls_idx; - /* number of VIFs requiring extra spare mem-blocks */ - int extra_spare_vif_count; + /* number of keys requiring extra spare mem-blocks */ + int extra_spare_key_count; }; #define WL18XX_FW_MAX_TX_STATUS_DESC 33 @@ -68,7 +68,43 @@ struct wl18xx_fw_status_priv { */ u8 released_tx_desc[WL18XX_FW_MAX_TX_STATUS_DESC]; - u8 padding[2]; + /* A bitmap representing the currently suspended links. The suspend + * is short lived, for multi-channel Tx requirements. + */ + __le32 link_suspend_bitmap; + + /* packet threshold for an "almost empty" AC, + * for Tx schedulng purposes + */ + u8 tx_ac_threshold; + + /* number of packets to queue up for a link in PS */ + u8 tx_ps_threshold; + + /* number of packet to queue up for a suspended link */ + u8 tx_suspend_threshold; + + /* Should have less than this number of packets in queue of a slow + * link to qualify as high priority link + */ + u8 tx_slow_link_prio_threshold; + + /* Should have less than this number of packets in queue of a fast + * link to qualify as high priority link + */ + u8 tx_fast_link_prio_threshold; + + /* Should have less than this number of packets in queue of a slow + * link before we stop queuing up packets for it. + */ + u8 tx_slow_stop_threshold; + + /* Should have less than this number of packets in queue of a fast + * link before we stop queuing up packets for it. + */ + u8 tx_fast_stop_threshold; + + u8 padding[3]; }; #define WL18XX_PHY_VERSION_MAX_LEN 20 diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig index d7b907e67170..2b832825c3d4 100644 --- a/drivers/net/wireless/ti/wlcore/Kconfig +++ b/drivers/net/wireless/ti/wlcore/Kconfig @@ -33,8 +33,3 @@ config WLCORE_SDIO If you choose to build a module, it'll be called wlcore_sdio. Say N if unsure. - -config WL12XX_PLATFORM_DATA - bool - depends on WLCORE_SDIO != n || WL1251_SDIO != n - default y diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile index d9fba9e32130..b21398f6c3ec 100644 --- a/drivers/net/wireless/ti/wlcore/Makefile +++ b/drivers/net/wireless/ti/wlcore/Makefile @@ -9,7 +9,4 @@ obj-$(CONFIG_WLCORE) += wlcore.o obj-$(CONFIG_WLCORE_SPI) += wlcore_spi.o obj-$(CONFIG_WLCORE_SDIO) += wlcore_sdio.o -# small builtin driver bit -obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o - ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c index ce108a736bd0..c79654323396 100644 --- a/drivers/net/wireless/ti/wlcore/acx.c +++ b/drivers/net/wireless/ti/wlcore/acx.c @@ -1340,6 +1340,8 @@ out: kfree(acx); return ret; } +EXPORT_SYMBOL_GPL(wl1271_acx_set_ht_capabilities); + int wl1271_acx_set_ht_information(struct wl1271 *wl, struct wl12xx_vif *wlvif, @@ -1433,13 +1435,22 @@ int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, acx->win_size = wl->conf.ht.rx_ba_win_size; acx->ssn = ssn; - ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx, - sizeof(*acx)); + ret = wlcore_cmd_configure_failsafe(wl, ACX_BA_SESSION_RX_SETUP, acx, + sizeof(*acx), + BIT(CMD_STATUS_NO_RX_BA_SESSION)); if (ret < 0) { wl1271_warning("acx ba receiver session failed: %d", ret); goto out; } + /* sometimes we can't start the session */ + if (ret == CMD_STATUS_NO_RX_BA_SESSION) { + wl1271_warning("no fw rx ba on tid %d", tid_index); + ret = -EBUSY; + goto out; + } + + ret = 0; out: kfree(acx); return ret; diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h index d03215d6b3bd..126536c6a393 100644 --- a/drivers/net/wireless/ti/wlcore/acx.h +++ b/drivers/net/wireless/ti/wlcore/acx.h @@ -1025,7 +1025,6 @@ enum { ACX_CONFIG_HANGOVER = 0x0042, ACX_FEATURE_CFG = 0x0043, ACX_PROTECTION_CFG = 0x0044, - ACX_CHECKSUM_CONFIG = 0x0045, }; diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c index 375ea574eafb..77752b03f189 100644 --- a/drivers/net/wireless/ti/wlcore/boot.c +++ b/drivers/net/wireless/ti/wlcore/boot.c @@ -84,47 +84,57 @@ out: static int wlcore_validate_fw_ver(struct wl1271 *wl) { unsigned int *fw_ver = wl->chip.fw_ver; - unsigned int *min_ver = wl->min_fw_ver; + unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ? + wl->min_mr_fw_ver : wl->min_sr_fw_ver; + char min_fw_str[32] = ""; + int i; /* the chip must be exactly equal */ - if (min_ver[FW_VER_CHIP] != fw_ver[FW_VER_CHIP]) + if ((min_ver[FW_VER_CHIP] != WLCORE_FW_VER_IGNORE) && + (min_ver[FW_VER_CHIP] != fw_ver[FW_VER_CHIP])) goto fail; - /* always check the next digit if all previous ones are equal */ - - if (min_ver[FW_VER_IF_TYPE] < fw_ver[FW_VER_IF_TYPE]) - goto out; - else if (min_ver[FW_VER_IF_TYPE] > fw_ver[FW_VER_IF_TYPE]) + /* the firmware type must be equal */ + if ((min_ver[FW_VER_IF_TYPE] != WLCORE_FW_VER_IGNORE) && + (min_ver[FW_VER_IF_TYPE] != fw_ver[FW_VER_IF_TYPE])) goto fail; - if (min_ver[FW_VER_MAJOR] < fw_ver[FW_VER_MAJOR]) - goto out; - else if (min_ver[FW_VER_MAJOR] > fw_ver[FW_VER_MAJOR]) + /* the project number must be equal */ + if ((min_ver[FW_VER_SUBTYPE] != WLCORE_FW_VER_IGNORE) && + (min_ver[FW_VER_SUBTYPE] != fw_ver[FW_VER_SUBTYPE])) goto fail; - if (min_ver[FW_VER_SUBTYPE] < fw_ver[FW_VER_SUBTYPE]) - goto out; - else if (min_ver[FW_VER_SUBTYPE] > fw_ver[FW_VER_SUBTYPE]) + /* the API version must be greater or equal */ + if ((min_ver[FW_VER_MAJOR] != WLCORE_FW_VER_IGNORE) && + (min_ver[FW_VER_MAJOR] > fw_ver[FW_VER_MAJOR])) goto fail; - if (min_ver[FW_VER_MINOR] < fw_ver[FW_VER_MINOR]) - goto out; - else if (min_ver[FW_VER_MINOR] > fw_ver[FW_VER_MINOR]) + /* if the API version is equal... */ + if (((min_ver[FW_VER_MAJOR] == WLCORE_FW_VER_IGNORE) || + (min_ver[FW_VER_MAJOR] == fw_ver[FW_VER_MAJOR])) && + /* ...the minor must be greater or equal */ + ((min_ver[FW_VER_MINOR] != WLCORE_FW_VER_IGNORE) && + (min_ver[FW_VER_MINOR] > fw_ver[FW_VER_MINOR]))) goto fail; -out: return 0; fail: - wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is outdated.\n" - "Please use at least FW %u.%u.%u.%u.%u.\n" - "You can get more information at:\n" - "http://wireless.kernel.org/en/users/Drivers/wl12xx", + for (i = 0; i < NUM_FW_VER; i++) + if (min_ver[i] == WLCORE_FW_VER_IGNORE) + snprintf(min_fw_str, sizeof(min_fw_str), + "%s*.", min_fw_str); + else + snprintf(min_fw_str, sizeof(min_fw_str), + "%s%u.", min_fw_str, min_ver[i]); + + wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n" + "Please use at least FW %s\n" + "You can get the latest firmwares at:\n" + "git://github.com/TI-OpenLink/firmwares.git", fw_ver[FW_VER_CHIP], fw_ver[FW_VER_IF_TYPE], fw_ver[FW_VER_MAJOR], fw_ver[FW_VER_SUBTYPE], - fw_ver[FW_VER_MINOR], min_ver[FW_VER_CHIP], - min_ver[FW_VER_IF_TYPE], min_ver[FW_VER_MAJOR], - min_ver[FW_VER_SUBTYPE], min_ver[FW_VER_MINOR]); + fw_ver[FW_VER_MINOR], min_fw_str); return -EINVAL; } @@ -491,7 +501,7 @@ int wlcore_boot_run_firmware(struct wl1271 *wl) if (ret < 0) return ret; - wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); + wl->mbox_ptr[1] = wl->mbox_ptr[0] + wl->mbox_size; wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x", wl->mbox_ptr[0], wl->mbox_ptr[1]); @@ -508,23 +518,6 @@ int wlcore_boot_run_firmware(struct wl1271 *wl) */ /* unmask required mbox events */ - wl->event_mask = BSS_LOSE_EVENT_ID | - REGAINED_BSS_EVENT_ID | - SCAN_COMPLETE_EVENT_ID | - ROLE_STOP_COMPLETE_EVENT_ID | - RSSI_SNR_TRIGGER_0_EVENT_ID | - PSPOLL_DELIVERY_FAILURE_EVENT_ID | - SOFT_GEMINI_SENSE_EVENT_ID | - PERIODIC_SCAN_REPORT_EVENT_ID | - PERIODIC_SCAN_COMPLETE_EVENT_ID | - DUMMY_PACKET_EVENT_ID | - PEER_REMOVE_COMPLETE_EVENT_ID | - BA_SESSION_RX_CONSTRAINT_EVENT_ID | - REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID | - INACTIVE_STA_EVENT_ID | - MAX_TX_RETRY_EVENT_ID | - CHANNEL_SWITCH_COMPLETE_EVENT_ID; - ret = wl1271_event_unmask(wl); if (ret < 0) { wl1271_error("EVENT mask setting failed"); diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 27f83f72a93b..6331f9e1cb39 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -48,14 +48,15 @@ * @id: command id * @buf: buffer containing the command, must work with dma * @len: length of the buffer + * return the cmd status code on success. */ -int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, - size_t res_len) +static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf, + size_t len, size_t res_len) { struct wl1271_cmd_header *cmd; unsigned long timeout; u32 intr; - int ret = 0; + int ret; u16 status; u16 poll_count = 0; @@ -71,7 +72,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, ret = wlcore_write(wl, wl->cmd_box_addr, buf, len, false); if (ret < 0) - goto fail; + return ret; /* * TODO: we just need this because one bit is in a different @@ -79,19 +80,18 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, */ ret = wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len); if (ret < 0) - goto fail; + return ret; timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr); if (ret < 0) - goto fail; + return ret; while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { if (time_after(jiffies, timeout)) { wl1271_error("command complete timeout"); - ret = -ETIMEDOUT; - goto fail; + return -ETIMEDOUT; } poll_count++; @@ -102,7 +102,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr); if (ret < 0) - goto fail; + return ret; } /* read back the status code of the command */ @@ -111,33 +111,66 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, ret = wlcore_read(wl, wl->cmd_box_addr, cmd, res_len, false); if (ret < 0) - goto fail; + return ret; status = le16_to_cpu(cmd->status); - if (status != CMD_STATUS_SUCCESS) { - wl1271_error("command execute failure %d", status); - ret = -EIO; - goto fail; - } ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK, WL1271_ACX_INTR_CMD_COMPLETE); if (ret < 0) + return ret; + + return status; +} + +/* + * send command to fw and return cmd status on success + * valid_rets contains a bitmap of allowed error codes + */ +int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len, + size_t res_len, unsigned long valid_rets) +{ + int ret = __wlcore_cmd_send(wl, id, buf, len, res_len); + + if (ret < 0) goto fail; - return 0; + /* success is always a valid status */ + valid_rets |= BIT(CMD_STATUS_SUCCESS); + if (ret >= MAX_COMMAND_STATUS || + !test_bit(ret, &valid_rets)) { + wl1271_error("command execute failure %d", ret); + ret = -EIO; + goto fail; + } + return ret; fail: wl12xx_queue_recovery_work(wl); return ret; } +EXPORT_SYMBOL_GPL(wl1271_cmd_send); + +/* + * wrapper for wlcore_cmd_send that accept only CMD_STATUS_SUCCESS + * return 0 on success. + */ +int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, + size_t res_len) +{ + int ret = wlcore_cmd_send_failsafe(wl, id, buf, len, res_len, 0); + + if (ret < 0) + return ret; + return 0; +} /* * Poll the mailbox event field until any of the bits in the mask is set or a * timeout occurs (WL1271_EVENT_TIMEOUT in msecs) */ -static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, - u32 mask, bool *timeout) +int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, + u32 mask, bool *timeout) { u32 *events_vector; u32 event; @@ -187,20 +220,7 @@ out: kfree(events_vector); return ret; } - -static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) -{ - int ret; - bool timeout = false; - - ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask, &timeout); - if (ret != 0 || timeout) { - wl12xx_queue_recovery_work(wl); - return ret; - } - - return 0; -} +EXPORT_SYMBOL_GPL(wlcore_cmd_wait_for_event_or_timeout); int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type, u8 *role_id) @@ -278,6 +298,16 @@ out: return ret; } +static int wlcore_get_new_session_id(struct wl1271 *wl, u8 hlid) +{ + if (wl->session_ids[hlid] >= SESSION_COUNTER_MAX) + wl->session_ids[hlid] = 0; + + wl->session_ids[hlid]++; + + return wl->session_ids[hlid]; +} + int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) { unsigned long flags; @@ -285,12 +315,21 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) if (link >= WL12XX_MAX_LINKS) return -EBUSY; + wl->session_ids[link] = wlcore_get_new_session_id(wl, link); + /* these bits are used by op_tx */ spin_lock_irqsave(&wl->wl_lock, flags); __set_bit(link, wl->links_map); __set_bit(link, wlvif->links_map); spin_unlock_irqrestore(&wl->wl_lock, flags); + + /* take the last "freed packets" value from the current FW status */ + wl->links[link].prev_freed_pkts = + wl->fw_status_2->counters.tx_lnk_free_pkts[link]; + wl->links[link].wlvif = wlvif; *hlid = link; + + wl->active_link_count++; return 0; } @@ -307,24 +346,21 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) __clear_bit(*hlid, wlvif->links_map); spin_unlock_irqrestore(&wl->wl_lock, flags); + wl->links[*hlid].allocated_pkts = 0; + wl->links[*hlid].prev_freed_pkts = 0; + wl->links[*hlid].ba_bitmap = 0; + memset(wl->links[*hlid].addr, 0, ETH_ALEN); + /* * At this point op_tx() will not add more packets to the queues. We * can purge them. */ wl1271_tx_reset_link_queues(wl, *hlid); + wl->links[*hlid].wlvif = NULL; *hlid = WL12XX_INVALID_LINK_ID; -} - -static int wl12xx_get_new_session_id(struct wl1271 *wl, - struct wl12xx_vif *wlvif) -{ - if (wlvif->session_counter >= SESSION_COUNTER_MAX) - wlvif->session_counter = 0; - - wlvif->session_counter++; - - return wlvif->session_counter; + wl->active_link_count--; + WARN_ON_ONCE(wl->active_link_count < 0); } static u8 wlcore_get_native_channel_type(u8 nl_channel_type) @@ -345,7 +381,9 @@ static u8 wlcore_get_native_channel_type(u8 nl_channel_type) } static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, - struct wl12xx_vif *wlvif) + struct wl12xx_vif *wlvif, + enum ieee80211_band band, + int channel) { struct wl12xx_cmd_role_start *cmd; int ret; @@ -359,9 +397,9 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id); cmd->role_id = wlvif->dev_role_id; - if (wlvif->band == IEEE80211_BAND_5GHZ) + if (band == IEEE80211_BAND_5GHZ) cmd->band = WLCORE_BAND_5GHZ; - cmd->channel = wlvif->channel; + cmd->channel = channel; if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) { ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid); @@ -369,7 +407,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, goto out_free; } cmd->device.hlid = wlvif->dev_hlid; - cmd->device.session = wl12xx_get_new_session_id(wl, wlvif); + cmd->device.session = wl->session_ids[wlvif->dev_hlid]; wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d", cmd->role_id, cmd->device.hlid, cmd->device.session); @@ -420,12 +458,6 @@ static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl, goto out_free; } - ret = wl1271_cmd_wait_for_event(wl, ROLE_STOP_COMPLETE_EVENT_ID); - if (ret < 0) { - wl1271_error("cmd role stop dev event completion error"); - goto out_free; - } - wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid); out_free: @@ -439,6 +471,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct wl12xx_cmd_role_start *cmd; + u32 supported_rates; int ret; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -459,7 +492,14 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) cmd->sta.ssid_len = wlvif->ssid_len; memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len); memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN); - cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set); + + supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES | + wlcore_hw_sta_get_ap_rate_mask(wl, wlvif); + if (wlvif->p2p) + supported_rates &= ~CONF_TX_CCK_RATES; + + cmd->sta.local_rates = cpu_to_le32(supported_rates); + cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type); if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) { @@ -468,8 +508,14 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) goto out_free; } cmd->sta.hlid = wlvif->sta.hlid; - cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif); - cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set); + cmd->sta.session = wl->session_ids[wlvif->sta.hlid]; + /* + * We don't have the correct remote rates in this stage. The + * rates will be reconfigured later, after association, if the + * firmware supports ACX_PEER_CAP. Otherwise, there's nothing + * we can do, so use all supported_rates here. + */ + cmd->sta.remote_rates = cpu_to_le32(supported_rates); wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d " "basic_rate_set: 0x%x, remote_rates: 0x%x", @@ -482,6 +528,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) goto err_hlid; } + wlvif->sta.role_chan_type = wlvif->channel_type; goto out_free; err_hlid: @@ -500,7 +547,6 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl12xx_cmd_role_stop *cmd; int ret; - bool timeout = false; if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)) return -EINVAL; @@ -523,17 +569,6 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) goto out_free; } - /* - * Sometimes the firmware doesn't send this event, so we just - * time out without failing. Queue recovery for other - * failures. - */ - ret = wl1271_cmd_wait_for_event_or_timeout(wl, - ROLE_STOP_COMPLETE_EVENT_ID, - &timeout); - if (ret) - wl12xx_queue_recovery_work(wl); - wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid); out_free: @@ -579,12 +614,15 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) cmd->ap.bss_index = WL1271_AP_BSS_INDEX; cmd->ap.global_hlid = wlvif->ap.global_hlid; cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid; + cmd->ap.global_session_id = wl->session_ids[wlvif->ap.global_hlid]; + cmd->ap.bcast_session_id = wl->session_ids[wlvif->ap.bcast_hlid]; cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int); cmd->ap.dtim_interval = bss_conf->dtim_period; cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP; /* FIXME: Change when adding DFS */ cmd->ap.reset_tsf = 1; /* By default reset AP TSF */ + cmd->ap.wmm = wlvif->wmm_enabled; cmd->channel = wlvif->channel; cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type); @@ -599,8 +637,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len); } - supported_rates = CONF_TX_AP_ENABLED_RATES | CONF_TX_MCS_RATES | + supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES | wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif); + if (wlvif->p2p) + supported_rates &= ~CONF_TX_CCK_RATES; wl1271_debug(DEBUG_CMD, "cmd role start ap with supported_rates 0x%08x", supported_rates); @@ -799,8 +839,11 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len) * @id: acx id * @buf: buffer containing acx, including all headers, must work with dma * @len: length of buf + * @valid_rets: bitmap of valid cmd status codes (i.e. return values). + * return the cmd status on success. */ -int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len) +int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf, + size_t len, unsigned long valid_rets) { struct acx_header *acx = buf; int ret; @@ -812,12 +855,26 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len) /* payload length, does not include any headers */ acx->len = cpu_to_le16(len - sizeof(*acx)); - ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len, 0); + ret = wlcore_cmd_send_failsafe(wl, CMD_CONFIGURE, acx, len, 0, + valid_rets); if (ret < 0) { wl1271_warning("CONFIGURE command NOK"); return ret; } + return ret; +} + +/* + * wrapper for wlcore_cmd_configure that accepts only success status. + * return 0 on success + */ +int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len) +{ + int ret = wlcore_cmd_configure_failsafe(wl, id, buf, len, 0); + + if (ret < 0) + return ret; return 0; } EXPORT_SYMBOL_GPL(wl1271_cmd_configure); @@ -1034,8 +1091,8 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct sk_buff *skb; int ret; u32 rate; - u16 template_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4; - u16 template_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5; + u16 template_id_2_4 = wl->scan_templ_id_2_4; + u16 template_id_5 = wl->scan_templ_id_5; skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len, ie_len); @@ -1048,10 +1105,10 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len); - if (!sched_scan && + if (sched_scan && (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) { - template_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4; - template_id_5 = CMD_TEMPL_APP_PROBE_REQ_5; + template_id_2_4 = wl->sched_scan_templ_id_2_4; + template_id_5 = wl->sched_scan_templ_id_5; } rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); @@ -1068,6 +1125,7 @@ out: dev_kfree_skb(skb); return ret; } +EXPORT_SYMBOL_GPL(wl12xx_cmd_build_probe_req); struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, @@ -1379,7 +1437,8 @@ out: return ret; } -int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid) +int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 hlid) { struct wl12xx_cmd_set_peer_state *cmd; int ret = 0; @@ -1395,6 +1454,10 @@ int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid) cmd->hlid = hlid; cmd->state = WL1271_CMD_STA_STATE_CONNECTED; + /* wmm param is valid only for station role */ + if (wlvif->bss_type == BSS_TYPE_STA_BSS) + cmd->wmm = wlvif->wmm_enabled; + ret = wl1271_cmd_send(wl, CMD_SET_PEER_STATE, cmd, sizeof(*cmd), 0); if (ret < 0) { wl1271_error("failed to send set peer state command"); @@ -1429,6 +1492,7 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, cmd->hlid = hlid; cmd->sp_len = sta->max_sp; cmd->wmm = sta->wme ? 1 : 0; + cmd->session_id = wl->session_ids[hlid]; for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++) if (sta->wme && (sta->uapsd_queues & BIT(i))) @@ -1490,9 +1554,10 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid) goto out_free; } - ret = wl1271_cmd_wait_for_event_or_timeout(wl, - PEER_REMOVE_COMPLETE_EVENT_ID, - &timeout); + ret = wl->ops->wait_for_event(wl, + WLCORE_EVENT_PEER_REMOVE_COMPLETE, + &timeout); + /* * We are ok with a timeout here. The event is sometimes not sent * due to a firmware bug. In case of another error (like SDIO timeout) @@ -1508,6 +1573,131 @@ out: return ret; } +static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch) +{ + int idx = -1; + + switch (band) { + case IEEE80211_BAND_5GHZ: + if (ch >= 8 && ch <= 16) + idx = ((ch-8)/4 + 18); + else if (ch >= 34 && ch <= 64) + idx = ((ch-34)/2 + 3 + 18); + else if (ch >= 100 && ch <= 140) + idx = ((ch-100)/4 + 15 + 18); + else if (ch >= 149 && ch <= 165) + idx = ((ch-149)/4 + 26 + 18); + else + idx = -1; + break; + case IEEE80211_BAND_2GHZ: + if (ch >= 1 && ch <= 14) + idx = ch - 1; + else + idx = -1; + break; + default: + wl1271_error("get reg conf ch idx - unknown band: %d", + (int)band); + } + + return idx; +} + +void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel, + enum ieee80211_band band) +{ + int ch_bit_idx = 0; + + if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF)) + return; + + ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel); + + if (ch_bit_idx > 0 && ch_bit_idx <= WL1271_MAX_CHANNELS) + set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending); +} + +int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl) +{ + struct wl12xx_cmd_regdomain_dfs_config *cmd = NULL; + int ret = 0, i, b, ch_bit_idx; + struct ieee80211_channel *channel; + u32 tmp_ch_bitmap[2]; + u16 ch; + struct wiphy *wiphy = wl->hw->wiphy; + struct ieee80211_supported_band *band; + bool timeout = false; + + if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF)) + return 0; + + wl1271_debug(DEBUG_CMD, "cmd reg domain config"); + + memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap)); + + for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) { + band = wiphy->bands[b]; + for (i = 0; i < band->n_channels; i++) { + channel = &band->channels[i]; + ch = channel->hw_value; + + if (channel->flags & (IEEE80211_CHAN_DISABLED | + IEEE80211_CHAN_RADAR | + IEEE80211_CHAN_PASSIVE_SCAN)) + continue; + + ch_bit_idx = wlcore_get_reg_conf_ch_idx(b, ch); + if (ch_bit_idx < 0) + continue; + + set_bit(ch_bit_idx, (long *)tmp_ch_bitmap); + } + } + + tmp_ch_bitmap[0] |= wl->reg_ch_conf_pending[0]; + tmp_ch_bitmap[1] |= wl->reg_ch_conf_pending[1]; + + if (!memcmp(tmp_ch_bitmap, wl->reg_ch_conf_last, sizeof(tmp_ch_bitmap))) + goto out; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->ch_bit_map1 = cpu_to_le32(tmp_ch_bitmap[0]); + cmd->ch_bit_map2 = cpu_to_le32(tmp_ch_bitmap[1]); + + wl1271_debug(DEBUG_CMD, + "cmd reg domain bitmap1: 0x%08x, bitmap2: 0x%08x", + cmd->ch_bit_map1, cmd->ch_bit_map2); + + ret = wl1271_cmd_send(wl, CMD_DFS_CHANNEL_CONFIG, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to send reg domain dfs config"); + goto out; + } + + ret = wl->ops->wait_for_event(wl, + WLCORE_EVENT_DFS_CONFIG_COMPLETE, + &timeout); + if (ret < 0 || timeout) { + wl1271_error("reg domain conf %serror", + timeout ? "completion " : ""); + ret = timeout ? -ETIMEDOUT : ret; + goto out; + } + + memcpy(wl->reg_ch_conf_last, tmp_ch_bitmap, sizeof(tmp_ch_bitmap)); + memset(wl->reg_ch_conf_pending, 0, sizeof(wl->reg_ch_conf_pending)); + +out: + kfree(cmd); + return ret; +} + int wl12xx_cmd_config_fwlog(struct wl1271 *wl) { struct wl12xx_cmd_config_fwlog *cmd; @@ -1593,12 +1783,12 @@ out: } static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, - u8 role_id) + u8 role_id, enum ieee80211_band band, u8 channel) { struct wl12xx_cmd_roc *cmd; int ret = 0; - wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id); + wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", channel, role_id); if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID)) return -EINVAL; @@ -1610,8 +1800,8 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, } cmd->role_id = role_id; - cmd->channel = wlvif->channel; - switch (wlvif->band) { + cmd->channel = channel; + switch (band) { case IEEE80211_BAND_2GHZ: cmd->band = WLCORE_BAND_2_4GHZ; break; @@ -1666,30 +1856,18 @@ out: return ret; } -int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id) +int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id, + enum ieee80211_band band, u8 channel) { int ret = 0; - bool is_first_roc; if (WARN_ON(test_bit(role_id, wl->roc_map))) return 0; - is_first_roc = (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) >= - WL12XX_MAX_ROLES); - - ret = wl12xx_cmd_roc(wl, wlvif, role_id); + ret = wl12xx_cmd_roc(wl, wlvif, role_id, band, channel); if (ret < 0) goto out; - if (is_first_roc) { - ret = wl1271_cmd_wait_for_event(wl, - REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID); - if (ret < 0) { - wl1271_error("cmd roc event completion error"); - goto out; - } - } - __set_bit(role_id, wl->roc_map); out: return ret; @@ -1719,43 +1897,7 @@ out: return ret; } -int wl12xx_cmd_channel_switch(struct wl1271 *wl, - struct wl12xx_vif *wlvif, - struct ieee80211_channel_switch *ch_switch) -{ - struct wl12xx_cmd_channel_switch *cmd; - int ret; - - wl1271_debug(DEBUG_ACX, "cmd channel switch"); - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) { - ret = -ENOMEM; - goto out; - } - - cmd->role_id = wlvif->role_id; - cmd->channel = ch_switch->channel->hw_value; - cmd->switch_time = ch_switch->count; - cmd->stop_tx = ch_switch->block_tx; - - /* FIXME: control from mac80211 in the future */ - cmd->post_switch_tx_disable = 0; /* Enable TX on the target channel */ - - ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0); - if (ret < 0) { - wl1271_error("failed to send channel switch command"); - goto out_free; - } - -out_free: - kfree(cmd); - -out: - return ret; -} - -int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl) +int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl12xx_cmd_stop_channel_switch *cmd; int ret; @@ -1768,6 +1910,8 @@ int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl) goto out; } + cmd->role_id = wlvif->role_id; + ret = wl1271_cmd_send(wl, CMD_STOP_CHANNEL_SWICTH, cmd, sizeof(*cmd), 0); if (ret < 0) { wl1271_error("failed to stop channel switch command"); @@ -1782,7 +1926,8 @@ out: } /* start dev role and roc on its channel */ -int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum ieee80211_band band, int channel) { int ret; @@ -1797,11 +1942,11 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif) if (ret < 0) goto out; - ret = wl12xx_cmd_role_start_dev(wl, wlvif); + ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel); if (ret < 0) goto out_disable; - ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id); + ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id, band, channel); if (ret < 0) goto out_stop; diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h index 2409f3d71f63..fd34123047cd 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.h +++ b/drivers/net/wireless/ti/wlcore/cmd.h @@ -31,6 +31,8 @@ struct acx_header; int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, size_t res_len); +int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len, + size_t res_len, unsigned long valid_rets); int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type, u8 *role_id); int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id); @@ -39,11 +41,14 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif); -int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum ieee80211_band band, int channel); int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); +int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf, + size_t len, unsigned long valid_rets); int wl1271_cmd_data_path(struct wl1271 *wl, bool enable); int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 ps_mode, u16 auto_ps_timeout); @@ -75,22 +80,30 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 action, u8 id, u8 key_type, u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, u16 tx_seq_16); -int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid); -int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id); +int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 hlid); +int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id, + enum ieee80211_band band, u8 channel); int wl12xx_croc(struct wl1271 *wl, u8 role_id); int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct ieee80211_sta *sta, u8 hlid); int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid); +void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel, + enum ieee80211_band band); +int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl); int wl12xx_cmd_config_fwlog(struct wl1271 *wl); int wl12xx_cmd_start_fwlog(struct wl1271 *wl); int wl12xx_cmd_stop_fwlog(struct wl1271 *wl); int wl12xx_cmd_channel_switch(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct ieee80211_channel_switch *ch_switch); -int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl); +int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl, + struct wl12xx_vif *wlvif); int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid); void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid); +int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, + u32 mask, bool *timeout); enum wl1271_commands { CMD_INTERROGATE = 1, /* use this to read information elements */ @@ -149,8 +162,11 @@ enum wl1271_commands { CMD_WFD_START_DISCOVERY = 45, CMD_WFD_STOP_DISCOVERY = 46, CMD_WFD_ATTRIBUTE_CONFIG = 47, - CMD_NOP = 48, - CMD_LAST_COMMAND, + CMD_GENERIC_CFG = 48, + CMD_NOP = 49, + + /* start of 18xx specific commands */ + CMD_DFS_CHANNEL_CONFIG = 60, MAX_COMMAND_ID = 0xFFFF, }; @@ -167,8 +183,8 @@ enum cmd_templ { CMD_TEMPL_PS_POLL, CMD_TEMPL_KLV, CMD_TEMPL_DISCONNECT, - CMD_TEMPL_APP_PROBE_REQ_2_4, - CMD_TEMPL_APP_PROBE_REQ_5, + CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY, + CMD_TEMPL_APP_PROBE_REQ_5_LEGACY, CMD_TEMPL_BAR, /* for firmware internal use only */ CMD_TEMPL_CTS, /* * For CTS-to-self (FastCTS) mechanism @@ -179,6 +195,8 @@ enum cmd_templ { CMD_TEMPL_DEAUTH_AP, CMD_TEMPL_TEMPORARY, CMD_TEMPL_LINK_MEASUREMENT_REPORT, + CMD_TEMPL_PROBE_REQ_2_4_PERIODIC, + CMD_TEMPL_PROBE_REQ_5_PERIODIC, CMD_TEMPL_MAX = 0xff }; @@ -220,7 +238,8 @@ enum { CMD_STATUS_FW_RESET = 22, /* Driver internal use.*/ CMD_STATUS_TEMPLATE_OOM = 23, CMD_STATUS_NO_RX_BA_SESSION = 24, - MAX_COMMAND_STATUS = 0xff + + MAX_COMMAND_STATUS }; #define CMDMBOX_HEADER_LEN 4 @@ -345,7 +364,15 @@ struct wl12xx_cmd_role_start { u8 reset_tsf; - u8 padding_1[4]; + /* + * ap supports wmm (note that there is additional + * per-sta wmm configuration) + */ + u8 wmm; + + u8 bcast_session_id; + u8 global_session_id; + u8 padding_1[1]; } __packed ap; }; } __packed; @@ -515,7 +542,14 @@ struct wl12xx_cmd_set_peer_state { u8 hlid; u8 state; - u8 padding[2]; + + /* + * wmm is relevant for sta role only. + * ap role configures the per-sta wmm params in + * the add_peer command. + */ + u8 wmm; + u8 padding[1]; } __packed; struct wl12xx_cmd_roc { @@ -558,7 +592,7 @@ struct wl12xx_cmd_add_peer { u8 bss_index; u8 sp_len; u8 wmm; - u8 padding1; + u8 session_id; } __packed; struct wl12xx_cmd_remove_peer { @@ -597,6 +631,13 @@ enum wl12xx_fwlogger_output { WL12XX_FWLOG_OUTPUT_HOST, }; +struct wl12xx_cmd_regdomain_dfs_config { + struct wl1271_cmd_header header; + + __le32 ch_bit_map1; + __le32 ch_bit_map2; +} __packed; + struct wl12xx_cmd_config_fwlog { struct wl1271_cmd_header header; @@ -626,27 +667,13 @@ struct wl12xx_cmd_stop_fwlog { struct wl1271_cmd_header header; } __packed; -struct wl12xx_cmd_channel_switch { +struct wl12xx_cmd_stop_channel_switch { struct wl1271_cmd_header header; u8 role_id; - - /* The new serving channel */ - u8 channel; - /* Relative time of the serving channel switch in TBTT units */ - u8 switch_time; - /* Stop the role TX, should expect it after radar detection */ - u8 stop_tx; - /* The target channel tx status 1-stopped 0-open*/ - u8 post_switch_tx_disable; - u8 padding[3]; } __packed; -struct wl12xx_cmd_stop_channel_switch { - struct wl1271_cmd_header header; -} __packed; - /* Used to check radio status after calibration */ #define MAX_TLV_LENGTH 500 #define TEST_CMD_P2G_CAL 2 /* TX BiP */ diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h index 9e40760bafe1..2b96ff821341 100644 --- a/drivers/net/wireless/ti/wlcore/conf.h +++ b/drivers/net/wireless/ti/wlcore/conf.h @@ -57,20 +57,49 @@ enum { }; enum { - CONF_HW_RATE_INDEX_1MBPS = 0, - CONF_HW_RATE_INDEX_2MBPS = 1, - CONF_HW_RATE_INDEX_5_5MBPS = 2, - CONF_HW_RATE_INDEX_6MBPS = 3, - CONF_HW_RATE_INDEX_9MBPS = 4, - CONF_HW_RATE_INDEX_11MBPS = 5, - CONF_HW_RATE_INDEX_12MBPS = 6, - CONF_HW_RATE_INDEX_18MBPS = 7, - CONF_HW_RATE_INDEX_22MBPS = 8, - CONF_HW_RATE_INDEX_24MBPS = 9, - CONF_HW_RATE_INDEX_36MBPS = 10, - CONF_HW_RATE_INDEX_48MBPS = 11, - CONF_HW_RATE_INDEX_54MBPS = 12, - CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS, + CONF_HW_RATE_INDEX_1MBPS = 0, + CONF_HW_RATE_INDEX_2MBPS = 1, + CONF_HW_RATE_INDEX_5_5MBPS = 2, + CONF_HW_RATE_INDEX_11MBPS = 3, + CONF_HW_RATE_INDEX_6MBPS = 4, + CONF_HW_RATE_INDEX_9MBPS = 5, + CONF_HW_RATE_INDEX_12MBPS = 6, + CONF_HW_RATE_INDEX_18MBPS = 7, + CONF_HW_RATE_INDEX_24MBPS = 8, + CONF_HW_RATE_INDEX_36MBPS = 9, + CONF_HW_RATE_INDEX_48MBPS = 10, + CONF_HW_RATE_INDEX_54MBPS = 11, + CONF_HW_RATE_INDEX_MCS0 = 12, + CONF_HW_RATE_INDEX_MCS1 = 13, + CONF_HW_RATE_INDEX_MCS2 = 14, + CONF_HW_RATE_INDEX_MCS3 = 15, + CONF_HW_RATE_INDEX_MCS4 = 16, + CONF_HW_RATE_INDEX_MCS5 = 17, + CONF_HW_RATE_INDEX_MCS6 = 18, + CONF_HW_RATE_INDEX_MCS7 = 19, + CONF_HW_RATE_INDEX_MCS7_SGI = 20, + CONF_HW_RATE_INDEX_MCS0_40MHZ = 21, + CONF_HW_RATE_INDEX_MCS1_40MHZ = 22, + CONF_HW_RATE_INDEX_MCS2_40MHZ = 23, + CONF_HW_RATE_INDEX_MCS3_40MHZ = 24, + CONF_HW_RATE_INDEX_MCS4_40MHZ = 25, + CONF_HW_RATE_INDEX_MCS5_40MHZ = 26, + CONF_HW_RATE_INDEX_MCS6_40MHZ = 27, + CONF_HW_RATE_INDEX_MCS7_40MHZ = 28, + CONF_HW_RATE_INDEX_MCS7_40MHZ_SGI = 29, + + /* MCS8+ rates overlap with 40Mhz rates */ + CONF_HW_RATE_INDEX_MCS8 = 21, + CONF_HW_RATE_INDEX_MCS9 = 22, + CONF_HW_RATE_INDEX_MCS10 = 23, + CONF_HW_RATE_INDEX_MCS11 = 24, + CONF_HW_RATE_INDEX_MCS12 = 25, + CONF_HW_RATE_INDEX_MCS13 = 26, + CONF_HW_RATE_INDEX_MCS14 = 27, + CONF_HW_RATE_INDEX_MCS15 = 28, + CONF_HW_RATE_INDEX_MCS15_SGI = 29, + + CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_MCS7_40MHZ_SGI, }; #define CONF_HW_RXTX_RATE_UNSUPPORTED 0xff @@ -415,11 +444,11 @@ struct conf_rx_settings { #define CONF_TX_RATE_MASK_BASIC_P2P CONF_HW_BIT_RATE_6MBPS /* - * Rates supported for data packets when operating as AP. Note the absence + * Rates supported for data packets when operating as STA/AP. Note the absence * of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop * one. The rate dropped is not mandatory under any operating mode. */ -#define CONF_TX_AP_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \ +#define CONF_TX_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \ CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS | \ CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS | \ @@ -677,6 +706,18 @@ struct conf_tx_settings { /* Time in ms for Tx watchdog timer to expire */ u32 tx_watchdog_timeout; + + /* + * when a slow link has this much packets pending, it becomes a low + * priority link, scheduling-wise + */ + u8 slow_link_thold; + + /* + * when a fast link has this much packets pending, it becomes a low + * priority link, scheduling-wise + */ + u8 fast_link_thold; } __packed; enum { @@ -1047,6 +1088,7 @@ struct conf_roam_trigger_settings { struct conf_scan_settings { /* * The minimum time to wait on each channel for active scans + * This value will be used whenever there's a connected interface. * * Range: u32 tu/1000 */ @@ -1054,24 +1096,37 @@ struct conf_scan_settings { /* * The maximum time to wait on each channel for active scans + * This value will be currently used whenever there's a + * connected interface. It shouldn't exceed 30000 (~30ms) to avoid + * possible interference of voip traffic going on while scanning. * * Range: u32 tu/1000 */ u32 max_dwell_time_active; - /* - * The minimum time to wait on each channel for passive scans + /* The minimum time to wait on each channel for active scans + * when it's possible to have longer scan dwell times. + * Currently this is used whenever we're idle on all interfaces. + * Longer dwell times improve detection of networks within a + * single scan. * * Range: u32 tu/1000 */ - u32 min_dwell_time_passive; + u32 min_dwell_time_active_long; - /* - * The maximum time to wait on each channel for passive scans + /* The maximum time to wait on each channel for active scans + * when it's possible to have longer scan dwell times. + * See min_dwell_time_active_long * * Range: u32 tu/1000 */ - u32 max_dwell_time_passive; + u32 max_dwell_time_active_long; + + /* time to wait on the channel for passive scans (in TU/1000) */ + u32 dwell_time_passive; + + /* time to wait on the channel for DFS scans (in TU/1000) */ + u32 dwell_time_dfs; /* * Number of probe requests to transmit on each active scan channel @@ -1276,12 +1331,20 @@ struct conf_hangover_settings { u8 window_size; } __packed; +struct conf_recovery_settings { + /* BUG() on fw recovery */ + u8 bug_on_recovery; + + /* Prevent HW recovery. FW will remain stuck. */ + u8 no_recovery; +} __packed; + /* * The conf version consists of 4 bytes. The two MSB are the wlcore * version, the two LSB are the lower driver's private conf * version. */ -#define WLCORE_CONF_VERSION (0x0002 << 16) +#define WLCORE_CONF_VERSION (0x0005 << 16) #define WLCORE_CONF_MASK 0xffff0000 #define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \ sizeof(struct wlcore_conf)) @@ -1309,6 +1372,7 @@ struct wlcore_conf { struct conf_fwlog fwlog; struct conf_rate_policy_settings rate; struct conf_hangover_settings hangover; + struct conf_recovery_settings recovery; } __packed; struct wlcore_conf_file { diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index c86bb00c2488..e70a7c864865 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -490,7 +490,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf, DRIVER_STATE_PRINT_HEX(chip.id); DRIVER_STATE_PRINT_STR(chip.fw_ver_str); DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str); - DRIVER_STATE_PRINT_INT(sched_scanning); + DRIVER_STATE_PRINT_INT(recovery_count); #undef DRIVER_STATE_PRINT_INT #undef DRIVER_STATE_PRINT_LONG @@ -560,7 +560,6 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf, if (wlvif->bss_type == BSS_TYPE_STA_BSS || wlvif->bss_type == BSS_TYPE_IBSS) { VIF_STATE_PRINT_INT(sta.hlid); - VIF_STATE_PRINT_INT(sta.ba_rx_bitmap); VIF_STATE_PRINT_INT(sta.basic_rate_idx); VIF_STATE_PRINT_INT(sta.ap_rate_idx); VIF_STATE_PRINT_INT(sta.p2p_rate_idx); @@ -577,6 +576,10 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf, VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]); } VIF_STATE_PRINT_INT(last_tx_hlid); + VIF_STATE_PRINT_INT(tx_queue_count[0]); + VIF_STATE_PRINT_INT(tx_queue_count[1]); + VIF_STATE_PRINT_INT(tx_queue_count[2]); + VIF_STATE_PRINT_INT(tx_queue_count[3]); VIF_STATE_PRINT_LHEX(links_map[0]); VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len); VIF_STATE_PRINT_INT(band); @@ -589,7 +592,6 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf, VIF_STATE_PRINT_INT(beacon_int); VIF_STATE_PRINT_INT(default_key); VIF_STATE_PRINT_INT(aid); - VIF_STATE_PRINT_INT(session_counter); VIF_STATE_PRINT_INT(psm_entry_retry); VIF_STATE_PRINT_INT(power_level); VIF_STATE_PRINT_INT(rssi_thold); @@ -993,7 +995,7 @@ static ssize_t sleep_auth_write(struct file *file, return -EINVAL; } - if (value < 0 || value > WL1271_PSM_MAX) { + if (value > WL1271_PSM_MAX) { wl1271_warning("sleep_auth must be between 0 and %d", WL1271_PSM_MAX); return -ERANGE; diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c index 48907054d493..70f289aa1bc6 100644 --- a/drivers/net/wireless/ti/wlcore/event.c +++ b/drivers/net/wireless/ti/wlcore/event.c @@ -29,34 +29,39 @@ #include "scan.h" #include "wl12xx_80211.h" -static void wl1271_event_rssi_trigger(struct wl1271 *wl, - struct wl12xx_vif *wlvif, - struct event_mailbox *mbox) +void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct wl12xx_vif *wlvif; + struct ieee80211_vif *vif; enum nl80211_cqm_rssi_threshold_event event; - s8 metric = mbox->rssi_snr_trigger_metric[0]; + s8 metric = metric_arr[0]; wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric); - if (metric <= wlvif->rssi_thold) - event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; - else - event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; - - if (event != wlvif->last_rssi_event) - ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL); - wlvif->last_rssi_event = event; + /* TODO: check actual multi-role support */ + wl12xx_for_each_wlvif_sta(wl, wlvif) { + if (metric <= wlvif->rssi_thold) + event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; + else + event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; + + vif = wl12xx_wlvif_to_vif(wlvif); + if (event != wlvif->last_rssi_event) + ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL); + wlvif->last_rssi_event = event; + } } +EXPORT_SYMBOL_GPL(wlcore_event_rssi_trigger); static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); if (wlvif->bss_type != BSS_TYPE_AP_BSS) { - if (!wlvif->sta.ba_rx_bitmap) + u8 hlid = wlvif->sta.hlid; + if (!wl->links[hlid].ba_bitmap) return; - ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap, + ieee80211_stop_rx_ba_session(vif, wl->links[hlid].ba_bitmap, vif->bss_conf.bssid); } else { u8 hlid; @@ -74,8 +79,7 @@ static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif) } } -static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl, - u8 enable) +void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable) { struct wl12xx_vif *wlvif; @@ -87,201 +91,169 @@ static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl, wl1271_recalc_rx_streaming(wl, wlvif); } } - } +EXPORT_SYMBOL_GPL(wlcore_event_soft_gemini_sense); -static void wl1271_event_mbox_dump(struct event_mailbox *mbox) +void wlcore_event_sched_scan_completed(struct wl1271 *wl, + u8 status) { - wl1271_debug(DEBUG_EVENT, "MBOX DUMP:"); - wl1271_debug(DEBUG_EVENT, "\tvector: 0x%x", mbox->events_vector); - wl1271_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask); + wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT (status 0x%0x)", + status); + + if (wl->sched_vif) { + ieee80211_sched_scan_stopped(wl->hw); + wl->sched_vif = NULL; + } } +EXPORT_SYMBOL_GPL(wlcore_event_sched_scan_completed); -static int wl1271_event_process(struct wl1271 *wl) +void wlcore_event_ba_rx_constraint(struct wl1271 *wl, + unsigned long roles_bitmap, + unsigned long allowed_bitmap) { - struct event_mailbox *mbox = wl->mbox; - struct ieee80211_vif *vif; struct wl12xx_vif *wlvif; - u32 vector; - bool disconnect_sta = false; - unsigned long sta_bitmap = 0; - int ret; - - wl1271_event_mbox_dump(mbox); - - vector = le32_to_cpu(mbox->events_vector); - vector &= ~(le32_to_cpu(mbox->events_mask)); - wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector); - if (vector & SCAN_COMPLETE_EVENT_ID) { - wl1271_debug(DEBUG_EVENT, "status: 0x%x", - mbox->scheduled_scan_status); - - wl1271_scan_stm(wl, wl->scan_vif); - } + wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx allowed=0x%lx", + __func__, roles_bitmap, allowed_bitmap); - if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) { - wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_REPORT_EVENT " - "(status 0x%0x)", mbox->scheduled_scan_status); + wl12xx_for_each_wlvif(wl, wlvif) { + if (wlvif->role_id == WL12XX_INVALID_ROLE_ID || + !test_bit(wlvif->role_id , &roles_bitmap)) + continue; - wl1271_scan_sched_scan_results(wl); + wlvif->ba_allowed = !!test_bit(wlvif->role_id, + &allowed_bitmap); + if (!wlvif->ba_allowed) + wl1271_stop_ba_event(wl, wlvif); } +} +EXPORT_SYMBOL_GPL(wlcore_event_ba_rx_constraint); - if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID) { - wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT " - "(status 0x%0x)", mbox->scheduled_scan_status); - if (wl->sched_scanning) { - ieee80211_sched_scan_stopped(wl->hw); - wl->sched_scanning = false; - } - } +void wlcore_event_channel_switch(struct wl1271 *wl, + unsigned long roles_bitmap, + bool success) +{ + struct wl12xx_vif *wlvif; + struct ieee80211_vif *vif; - if (vector & SOFT_GEMINI_SENSE_EVENT_ID) - wl12xx_event_soft_gemini_sense(wl, - mbox->soft_gemini_sense_info); + wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d", + __func__, roles_bitmap, success); - /* - * We are HW_MONITOR device. On beacon loss - queue - * connection loss work. Cancel it on REGAINED event. - */ - if (vector & BSS_LOSE_EVENT_ID) { - /* TODO: check for multi-role */ - int delay = wl->conf.conn.synch_fail_thold * - wl->conf.conn.bss_lose_timeout; - wl1271_info("Beacon loss detected."); + wl12xx_for_each_wlvif_sta(wl, wlvif) { + if (wlvif->role_id == WL12XX_INVALID_ROLE_ID || + !test_bit(wlvif->role_id , &roles_bitmap)) + continue; - /* - * if the work is already queued, it should take place. We - * don't want to delay the connection loss indication - * any more. - */ - ieee80211_queue_delayed_work(wl->hw, &wl->connection_loss_work, - msecs_to_jiffies(delay)); + if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, + &wlvif->flags)) + continue; - wl12xx_for_each_wlvif_sta(wl, wlvif) { - vif = wl12xx_wlvif_to_vif(wlvif); + vif = wl12xx_wlvif_to_vif(wlvif); - ieee80211_cqm_rssi_notify( - vif, - NL80211_CQM_RSSI_BEACON_LOSS_EVENT, - GFP_KERNEL); - } + ieee80211_chswitch_done(vif, success); + cancel_delayed_work(&wlvif->channel_switch_work); } +} +EXPORT_SYMBOL_GPL(wlcore_event_channel_switch); - if (vector & REGAINED_BSS_EVENT_ID) { - /* TODO: check for multi-role */ - wl1271_info("Beacon regained."); - cancel_delayed_work(&wl->connection_loss_work); - - /* sanity check - we can't lose and gain the beacon together */ - WARN(vector & BSS_LOSE_EVENT_ID, - "Concurrent beacon loss and gain from FW"); - } +void wlcore_event_dummy_packet(struct wl1271 *wl) +{ + wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID"); + wl1271_tx_dummy_packet(wl); +} +EXPORT_SYMBOL_GPL(wlcore_event_dummy_packet); - if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) { - /* TODO: check actual multi-role support */ - wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT"); - wl12xx_for_each_wlvif_sta(wl, wlvif) { - wl1271_event_rssi_trigger(wl, wlvif, mbox); +static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap) +{ + u32 num_packets = wl->conf.tx.max_tx_retries; + struct wl12xx_vif *wlvif; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + const u8 *addr; + int h; + + for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) { + bool found = false; + /* find the ap vif connected to this sta */ + wl12xx_for_each_wlvif_ap(wl, wlvif) { + if (!test_bit(h, wlvif->ap.sta_hlid_map)) + continue; + found = true; + break; } - } + if (!found) + continue; - if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) { - u8 role_id = mbox->role_id; - wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. " - "ba_allowed = 0x%x, role_id=%d", - mbox->rx_ba_allowed, role_id); + vif = wl12xx_wlvif_to_vif(wlvif); + addr = wl->links[h].addr; - wl12xx_for_each_wlvif(wl, wlvif) { - if (role_id != 0xff && role_id != wlvif->role_id) - continue; - - wlvif->ba_allowed = !!mbox->rx_ba_allowed; - if (!wlvif->ba_allowed) - wl1271_stop_ba_event(wl, wlvif); + rcu_read_lock(); + sta = ieee80211_find_sta(vif, addr); + if (sta) { + wl1271_debug(DEBUG_EVENT, "remove sta %d", h); + ieee80211_report_low_ack(sta, num_packets); } + rcu_read_unlock(); } +} - if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) { - wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. " - "status = 0x%x", - mbox->channel_switch_status); - /* - * That event uses for two cases: - * 1) channel switch complete with status=0 - * 2) channel switch failed status=1 - */ - - /* TODO: configure only the relevant vif */ - wl12xx_for_each_wlvif_sta(wl, wlvif) { - bool success; - - if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, - &wlvif->flags)) - continue; - - success = mbox->channel_switch_status ? false : true; - vif = wl12xx_wlvif_to_vif(wlvif); +void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap) +{ + wl1271_debug(DEBUG_EVENT, "MAX_TX_FAILURE_EVENT_ID"); + wlcore_disconnect_sta(wl, sta_bitmap); +} +EXPORT_SYMBOL_GPL(wlcore_event_max_tx_failure); - ieee80211_chswitch_done(vif, success); - } - } +void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap) +{ + wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID"); + wlcore_disconnect_sta(wl, sta_bitmap); +} +EXPORT_SYMBOL_GPL(wlcore_event_inactive_sta); - if ((vector & DUMMY_PACKET_EVENT_ID)) { - wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID"); - ret = wl1271_tx_dummy_packet(wl); - if (ret < 0) - return ret; - } +void wlcore_event_roc_complete(struct wl1271 *wl) +{ + wl1271_debug(DEBUG_EVENT, "REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID"); + if (wl->roc_vif) + ieee80211_ready_on_channel(wl->hw); +} +EXPORT_SYMBOL_GPL(wlcore_event_roc_complete); +void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap) +{ /* - * "TX retries exceeded" has a different meaning according to mode. - * In AP mode the offending station is disconnected. + * We are HW_MONITOR device. On beacon loss - queue + * connection loss work. Cancel it on REGAINED event. */ - if (vector & MAX_TX_RETRY_EVENT_ID) { - wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID"); - sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded); - disconnect_sta = true; - } + struct wl12xx_vif *wlvif; + struct ieee80211_vif *vif; + int delay = wl->conf.conn.synch_fail_thold * + wl->conf.conn.bss_lose_timeout; - if (vector & INACTIVE_STA_EVENT_ID) { - wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID"); - sta_bitmap |= le16_to_cpu(mbox->sta_aging_status); - disconnect_sta = true; - } + wl1271_info("Beacon loss detected. roles:0x%lx", roles_bitmap); - if (disconnect_sta) { - u32 num_packets = wl->conf.tx.max_tx_retries; - struct ieee80211_sta *sta; - const u8 *addr; - int h; - - for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) { - bool found = false; - /* find the ap vif connected to this sta */ - wl12xx_for_each_wlvif_ap(wl, wlvif) { - if (!test_bit(h, wlvif->ap.sta_hlid_map)) - continue; - found = true; - break; - } - if (!found) - continue; + wl12xx_for_each_wlvif_sta(wl, wlvif) { + if (wlvif->role_id == WL12XX_INVALID_ROLE_ID || + !test_bit(wlvif->role_id , &roles_bitmap)) + continue; - vif = wl12xx_wlvif_to_vif(wlvif); - addr = wl->links[h].addr; + /* + * if the work is already queued, it should take place. + * We don't want to delay the connection loss + * indication any more. + */ + ieee80211_queue_delayed_work(wl->hw, + &wlvif->connection_loss_work, + msecs_to_jiffies(delay)); - rcu_read_lock(); - sta = ieee80211_find_sta(vif, addr); - if (sta) { - wl1271_debug(DEBUG_EVENT, "remove sta %d", h); - ieee80211_report_low_ack(sta, num_packets); - } - rcu_read_unlock(); - } + vif = wl12xx_wlvif_to_vif(wlvif); + ieee80211_cqm_rssi_notify( + vif, + NL80211_CQM_RSSI_BEACON_LOSS_EVENT, + GFP_KERNEL); } - return 0; } +EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss); int wl1271_event_unmask(struct wl1271 *wl) { @@ -305,12 +277,12 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num) /* first we read the mbox descriptor */ ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox, - sizeof(*wl->mbox), false); + wl->mbox_size, false); if (ret < 0) return ret; /* process the descriptor */ - ret = wl1271_event_process(wl); + ret = wl->ops->process_mailbox_events(wl); if (ret < 0) return ret; diff --git a/drivers/net/wireless/ti/wlcore/event.h b/drivers/net/wireless/ti/wlcore/event.h index 8adf18d6c58f..acc7a59d3828 100644 --- a/drivers/net/wireless/ti/wlcore/event.h +++ b/drivers/net/wireless/ti/wlcore/event.h @@ -46,33 +46,17 @@ enum { RSSI_SNR_TRIGGER_5_EVENT_ID = BIT(5), RSSI_SNR_TRIGGER_6_EVENT_ID = BIT(6), RSSI_SNR_TRIGGER_7_EVENT_ID = BIT(7), - MEASUREMENT_START_EVENT_ID = BIT(8), - MEASUREMENT_COMPLETE_EVENT_ID = BIT(9), - SCAN_COMPLETE_EVENT_ID = BIT(10), - WFD_DISCOVERY_COMPLETE_EVENT_ID = BIT(11), - AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(12), - RESERVED1 = BIT(13), - PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(14), - ROLE_STOP_COMPLETE_EVENT_ID = BIT(15), - RADAR_DETECTED_EVENT_ID = BIT(16), - CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17), - BSS_LOSE_EVENT_ID = BIT(18), - REGAINED_BSS_EVENT_ID = BIT(19), - MAX_TX_RETRY_EVENT_ID = BIT(20), - DUMMY_PACKET_EVENT_ID = BIT(21), - SOFT_GEMINI_SENSE_EVENT_ID = BIT(22), - CHANGE_AUTO_MODE_TIMEOUT_EVENT_ID = BIT(23), - SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24), - PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25), - INACTIVE_STA_EVENT_ID = BIT(26), - PEER_REMOVE_COMPLETE_EVENT_ID = BIT(27), - PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(28), - PERIODIC_SCAN_REPORT_EVENT_ID = BIT(29), - BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(30), - REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(31), + EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff, }; +/* events the driver might want to wait for */ +enum wlcore_wait_event { + WLCORE_EVENT_ROLE_STOP_COMPLETE, + WLCORE_EVENT_PEER_REMOVE_COMPLETE, + WLCORE_EVENT_DFS_CONFIG_COMPLETE +}; + enum { EVENT_ENTER_POWER_SAVE_FAIL = 0, EVENT_ENTER_POWER_SAVE_SUCCESS, @@ -80,61 +64,24 @@ enum { #define NUM_OF_RSSI_SNR_TRIGGERS 8 -struct event_mailbox { - __le32 events_vector; - __le32 events_mask; - __le32 reserved_1; - __le32 reserved_2; - - u8 number_of_scan_results; - u8 scan_tag; - u8 completed_scan_status; - u8 reserved_3; - - u8 soft_gemini_sense_info; - u8 soft_gemini_protective_info; - s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS]; - u8 change_auto_mode_timeout; - u8 scheduled_scan_status; - u8 reserved4; - /* tuned channel (roc) */ - u8 roc_channel; - - __le16 hlid_removed_bitmap; - - /* bitmap of aged stations (by HLID) */ - __le16 sta_aging_status; - - /* bitmap of stations (by HLID) which exceeded max tx retries */ - __le16 sta_tx_retry_exceeded; - - /* discovery completed results */ - u8 discovery_tag; - u8 number_of_preq_results; - u8 number_of_prsp_results; - u8 reserved_5; - - /* rx ba constraint */ - u8 role_id; /* 0xFF means any role. */ - u8 rx_ba_allowed; - u8 reserved_6[2]; - - /* Channel switch results */ - - u8 channel_switch_role_id; - u8 channel_switch_status; - u8 reserved_7[2]; - - u8 ps_poll_delivery_failure_role_ids; - u8 stopped_role_ids; - u8 started_role_ids; - - u8 reserved_8[9]; -} __packed; - struct wl1271; int wl1271_event_unmask(struct wl1271 *wl); int wl1271_event_handle(struct wl1271 *wl, u8 mbox); +void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable); +void wlcore_event_sched_scan_completed(struct wl1271 *wl, + u8 status); +void wlcore_event_ba_rx_constraint(struct wl1271 *wl, + unsigned long roles_bitmap, + unsigned long allowed_bitmap); +void wlcore_event_channel_switch(struct wl1271 *wl, + unsigned long roles_bitmap, + bool success); +void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap); +void wlcore_event_dummy_packet(struct wl1271 *wl); +void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap); +void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap); +void wlcore_event_roc_complete(struct wl1271 *wl); +void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr); #endif diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h index 2673d783ec1e..7fd260c02a0a 100644 --- a/drivers/net/wireless/ti/wlcore/hw_ops.h +++ b/drivers/net/wireless/ti/wlcore/hw_ops.h @@ -201,4 +201,45 @@ wlcore_hw_pre_pkt_send(struct wl1271 *wl, u32 buf_offset, u32 last_len) return buf_offset; } +static inline void +wlcore_hw_sta_rc_update(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct ieee80211_sta *sta, u32 changed) +{ + if (wl->ops->sta_rc_update) + wl->ops->sta_rc_update(wl, wlvif, sta, changed); +} + +static inline int +wlcore_hw_set_peer_cap(struct wl1271 *wl, + struct ieee80211_sta_ht_cap *ht_cap, + bool allow_ht_operation, + u32 rate_set, u8 hlid) +{ + if (wl->ops->set_peer_cap) + return wl->ops->set_peer_cap(wl, ht_cap, allow_ht_operation, + rate_set, hlid); + + return 0; +} + +static inline bool +wlcore_hw_lnk_high_prio(struct wl1271 *wl, u8 hlid, + struct wl1271_link *lnk) +{ + if (!wl->ops->lnk_high_prio) + BUG_ON(1); + + return wl->ops->lnk_high_prio(wl, hlid, lnk); +} + +static inline bool +wlcore_hw_lnk_low_prio(struct wl1271 *wl, u8 hlid, + struct wl1271_link *lnk) +{ + if (!wl->ops->lnk_low_prio) + BUG_ON(1); + + return wl->ops->lnk_low_prio(wl, hlid, lnk); +} + #endif diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c index 32d157f62f31..5c6f11e157d9 100644 --- a/drivers/net/wireless/ti/wlcore/init.c +++ b/drivers/net/wireless/ti/wlcore/init.c @@ -41,14 +41,14 @@ int wl1271_init_templates_config(struct wl1271 *wl) /* send empty templates for fw memory reservation */ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, - CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL, + wl->scan_templ_id_2_4, NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0, WL1271_RATE_AUTOMATIC); if (ret < 0) return ret; ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, - CMD_TEMPL_CFG_PROBE_REQ_5, + wl->scan_templ_id_5, NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0, WL1271_RATE_AUTOMATIC); if (ret < 0) @@ -56,14 +56,16 @@ int wl1271_init_templates_config(struct wl1271 *wl) if (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL) { ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, - CMD_TEMPL_APP_PROBE_REQ_2_4, NULL, + wl->sched_scan_templ_id_2_4, + NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0, WL1271_RATE_AUTOMATIC); if (ret < 0) return ret; ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, - CMD_TEMPL_APP_PROBE_REQ_5, NULL, + wl->sched_scan_templ_id_5, + NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0, WL1271_RATE_AUTOMATIC); if (ret < 0) @@ -463,7 +465,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif) if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES)) supported_rates = CONF_TX_OFDM_RATES; else - supported_rates = CONF_TX_AP_ENABLED_RATES; + supported_rates = CONF_TX_ENABLED_RATES; /* unconditionally enable HT rates */ supported_rates |= CONF_TX_MCS_RATES; @@ -575,9 +577,6 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif) /* Configure for power according to debugfs */ if (sta_auth != WL1271_PSM_ILLEGAL) ret = wl1271_acx_sleep_auth(wl, sta_auth); - /* Configure for power always on */ - else if (wl->quirks & WLCORE_QUIRK_NO_ELP) - ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); /* Configure for ELP power saving */ else ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); @@ -679,6 +678,10 @@ int wl1271_hw_init(struct wl1271 *wl) if (ret < 0) return ret; + ret = wlcore_cmd_regdomain_config_locked(wl); + if (ret < 0) + return ret; + /* Bluetooth WLAN coexistence */ ret = wl1271_init_pta(wl); if (ret < 0) diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h index f48530fec14f..af7d9f9b3b4d 100644 --- a/drivers/net/wireless/ti/wlcore/io.h +++ b/drivers/net/wireless/ti/wlcore/io.h @@ -105,13 +105,13 @@ static inline int __must_check wlcore_raw_read32(struct wl1271 *wl, int addr, { int ret; - ret = wlcore_raw_read(wl, addr, &wl->buffer_32, - sizeof(wl->buffer_32), false); + ret = wlcore_raw_read(wl, addr, wl->buffer_32, + sizeof(*wl->buffer_32), false); if (ret < 0) return ret; if (val) - *val = le32_to_cpu(wl->buffer_32); + *val = le32_to_cpu(*wl->buffer_32); return 0; } @@ -119,9 +119,9 @@ static inline int __must_check wlcore_raw_read32(struct wl1271 *wl, int addr, static inline int __must_check wlcore_raw_write32(struct wl1271 *wl, int addr, u32 val) { - wl->buffer_32 = cpu_to_le32(val); - return wlcore_raw_write(wl, addr, &wl->buffer_32, - sizeof(wl->buffer_32), false); + *wl->buffer_32 = cpu_to_le32(val); + return wlcore_raw_write(wl, addr, wl->buffer_32, + sizeof(*wl->buffer_32), false); } static inline int __must_check wlcore_read(struct wl1271 *wl, int addr, diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index ea9d8e011bc9..2c2ff3e1f849 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -56,8 +56,8 @@ #define WL1271_BOOT_RETRIES 3 static char *fwlog_param; -static bool bug_on_recovery; -static bool no_recovery; +static int bug_on_recovery = -1; +static int no_recovery = -1; static void __wl1271_op_remove_interface(struct wl1271 *wl, struct ieee80211_vif *vif, @@ -79,22 +79,22 @@ static int wl12xx_set_authorized(struct wl1271 *wl, if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags)) return 0; - ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid); + ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid); if (ret < 0) return ret; - wl12xx_croc(wl, wlvif->role_id); - wl1271_info("Association completed."); return 0; } -static int wl1271_reg_notify(struct wiphy *wiphy, - struct regulatory_request *request) +static void wl1271_reg_notify(struct wiphy *wiphy, + struct regulatory_request *request) { struct ieee80211_supported_band *band; struct ieee80211_channel *ch; int i; + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct wl1271 *wl = hw->priv; band = wiphy->bands[IEEE80211_BAND_5GHZ]; for (i = 0; i < band->n_channels; i++) { @@ -108,7 +108,8 @@ static int wl1271_reg_notify(struct wiphy *wiphy, } - return 0; + if (likely(wl->state == WLCORE_STATE_ON)) + wlcore_regdomain_config(wl); } static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, @@ -303,6 +304,7 @@ out: static void wlcore_adjust_conf(struct wl1271 *wl) { /* Adjust settings according to optional module parameters */ + if (fwlog_param) { if (!strcmp(fwlog_param, "continuous")) { wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; @@ -318,16 +320,22 @@ static void wlcore_adjust_conf(struct wl1271 *wl) wl1271_error("Unknown fwlog parameter %s", fwlog_param); } } + + if (bug_on_recovery != -1) + wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery; + + if (no_recovery != -1) + wl->conf.recovery.no_recovery = (u8) no_recovery; } static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid, u8 tx_pkts) { - bool fw_ps, single_sta; + bool fw_ps, single_link; fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); - single_sta = (wl->active_sta_count == 1); + single_link = (wl->active_link_count == 1); /* * Wake up from high level PS if the STA is asleep with too little @@ -338,10 +346,10 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, /* * Start high-level PS if the STA is asleep with enough blocks in FW. - * Make an exception if this is the only connected station. In this - * case FW-memory congestion is not a problem. + * Make an exception if this is the only connected link. In this + * case FW-memory congestion is less of a problem. */ - else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) + else if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) wl12xx_ps_link_start(wl, wlvif, hlid, true); } @@ -349,11 +357,8 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct wl_fw_status_2 *status) { - struct wl1271_link *lnk; u32 cur_fw_ps_map; - u8 hlid, cnt; - - /* TODO: also use link_fast_bitmap here */ + u8 hlid; cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap); if (wl->ap_fw_ps_map != cur_fw_ps_map) { @@ -365,17 +370,9 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl, wl->ap_fw_ps_map = cur_fw_ps_map; } - for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) { - lnk = &wl->links[hlid]; - cnt = status->counters.tx_lnk_free_pkts[hlid] - - lnk->prev_freed_pkts; - - lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid]; - lnk->allocated_pkts -= cnt; - + for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) wl12xx_irq_ps_regulate_link(wl, wlvif, hlid, - lnk->allocated_pkts); - } + wl->links[hlid].allocated_pkts); } static int wlcore_fw_status(struct wl1271 *wl, @@ -389,6 +386,7 @@ static int wlcore_fw_status(struct wl1271 *wl, int i; size_t status_len; int ret; + struct wl1271_link *lnk; status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) + sizeof(*status_2) + wl->fw_status_priv_len; @@ -414,6 +412,17 @@ static int wlcore_fw_status(struct wl1271 *wl, wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i]; } + + for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) { + lnk = &wl->links[i]; + /* prevent wrap-around in freed-packets counter */ + lnk->allocated_pkts -= + (status_2->counters.tx_lnk_free_pkts[i] - + lnk->prev_freed_pkts) & 0xff; + + lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i]; + } + /* prevent wrap-around in total blocks counter */ if (likely(wl->tx_blocks_freed <= le32_to_cpu(status_2->total_released_blks))) @@ -466,6 +475,8 @@ static int wlcore_fw_status(struct wl1271 *wl, wl->time_offset = (timespec_to_ns(&ts) >> 10) - (s64)le32_to_cpu(status_2->fw_localtime); + wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap); + return 0; } @@ -802,11 +813,13 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl) /* * Make sure the chip is awake and the logger isn't active. - * Do not send a stop fwlog command if the fw is hanged. + * Do not send a stop fwlog command if the fw is hanged or if + * dbgpins are used (due to some fw bug). */ if (wl1271_ps_elp_wakeup(wl)) goto out; - if (!wl->watchdog_recovery) + if (!wl->watchdog_recovery && + wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS) wl12xx_cmd_stop_fwlog(wl); /* Read the first memory block address */ @@ -874,7 +887,8 @@ static void wlcore_print_recovery(struct wl1271 *wl) if (ret < 0) return; - wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc, hint_sts); + wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d", + pc, hint_sts, ++wl->recovery_count); wlcore_set_partition(wl, &wl->ptable[PART_WORK]); } @@ -897,10 +911,10 @@ static void wl1271_recovery_work(struct work_struct *work) wlcore_print_recovery(wl); } - BUG_ON(bug_on_recovery && + BUG_ON(wl->conf.recovery.bug_on_recovery && !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); - if (no_recovery) { + if (wl->conf.recovery.no_recovery) { wl1271_info("No recovery (chosen on module load). Fw will remain stuck."); goto out_unlock; } @@ -920,11 +934,6 @@ static void wl1271_recovery_work(struct work_struct *work) /* Prevent spurious TX during FW restart */ wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART); - if (wl->sched_scanning) { - ieee80211_sched_scan_stopped(wl->hw); - wl->sched_scanning = false; - } - /* reboot the chipset */ while (!list_empty(&wl->wlvif_list)) { wlvif = list_first_entry(&wl->wlvif_list, @@ -1141,7 +1150,6 @@ int wl1271_plt_stop(struct wl1271 *wl) cancel_work_sync(&wl->recovery_work); cancel_delayed_work_sync(&wl->elp_work); cancel_delayed_work_sync(&wl->tx_watchdog_work); - cancel_delayed_work_sync(&wl->connection_loss_work); mutex_lock(&wl->mutex); wl1271_power_off(wl); @@ -1169,9 +1177,13 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, int q, mapping; u8 hlid; - if (vif) - wlvif = wl12xx_vif_to_data(vif); + if (!vif) { + wl1271_debug(DEBUG_TX, "DROP skb with no vif"); + ieee80211_free_txskb(hw, skb); + return; + } + wlvif = wl12xx_vif_to_data(vif); mapping = skb_get_queue_mapping(skb); q = wl1271_tx_get_queue(mapping); @@ -1185,9 +1197,9 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, * allow these packets through. */ if (hlid == WL12XX_INVALID_LINK_ID || - (wlvif && !test_bit(hlid, wlvif->links_map)) || - (wlcore_is_queue_stopped(wl, q) && - !wlcore_is_queue_stopped_by_reason(wl, q, + (!test_bit(hlid, wlvif->links_map)) || + (wlcore_is_queue_stopped_locked(wl, wlvif, q) && + !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q, WLCORE_QUEUE_STOP_REASON_WATERMARK))) { wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q); ieee80211_free_txskb(hw, skb); @@ -1199,16 +1211,17 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); wl->tx_queue_count[q]++; + wlvif->tx_queue_count[q]++; /* * The workqueue is slow to process the tx_queue and we need stop * the queue here, otherwise the queue will get too long. */ - if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK && - !wlcore_is_queue_stopped_by_reason(wl, q, + if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK && + !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q, WLCORE_QUEUE_STOP_REASON_WATERMARK)) { wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q); - wlcore_stop_queue_locked(wl, q, + wlcore_stop_queue_locked(wl, wlvif, q, WLCORE_QUEUE_STOP_REASON_WATERMARK); } @@ -1843,11 +1856,10 @@ static void wlcore_op_stop_locked(struct wl1271 *wl) cancel_work_sync(&wl->tx_work); cancel_delayed_work_sync(&wl->elp_work); cancel_delayed_work_sync(&wl->tx_watchdog_work); - cancel_delayed_work_sync(&wl->connection_loss_work); /* let's notify MAC80211 about the remaining pending TX frames */ - wl12xx_tx_reset(wl); mutex_lock(&wl->mutex); + wl12xx_tx_reset(wl); wl1271_power_off(wl); /* @@ -1870,14 +1882,17 @@ static void wlcore_op_stop_locked(struct wl1271 *wl) wl->time_offset = 0; wl->ap_fw_ps_map = 0; wl->ap_ps_map = 0; - wl->sched_scanning = false; wl->sleep_auth = WL1271_PSM_ILLEGAL; memset(wl->roles_map, 0, sizeof(wl->roles_map)); memset(wl->links_map, 0, sizeof(wl->links_map)); memset(wl->roc_map, 0, sizeof(wl->roc_map)); + memset(wl->session_ids, 0, sizeof(wl->session_ids)); wl->active_sta_count = 0; + wl->active_link_count = 0; /* The system link is always allocated */ + wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0; + wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0; __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); /* @@ -1903,6 +1918,12 @@ static void wlcore_op_stop_locked(struct wl1271 *wl) wl->tx_res_if = NULL; kfree(wl->target_mem_map); wl->target_mem_map = NULL; + + /* + * FW channels must be re-calibrated after recovery, + * clear the last Reg-Domain channel configuration. + */ + memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last)); } static void wlcore_op_stop(struct ieee80211_hw *hw) @@ -1918,6 +1939,71 @@ static void wlcore_op_stop(struct ieee80211_hw *hw) mutex_unlock(&wl->mutex); } +static void wlcore_channel_switch_work(struct work_struct *work) +{ + struct delayed_work *dwork; + struct wl1271 *wl; + struct ieee80211_vif *vif; + struct wl12xx_vif *wlvif; + int ret; + + dwork = container_of(work, struct delayed_work, work); + wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work); + wl = wlvif->wl; + + wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + /* check the channel switch is still ongoing */ + if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) + goto out; + + vif = wl12xx_wlvif_to_vif(wlvif); + ieee80211_chswitch_done(vif, false); + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl12xx_cmd_stop_channel_switch(wl, wlvif); + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); +} + +static void wlcore_connection_loss_work(struct work_struct *work) +{ + struct delayed_work *dwork; + struct wl1271 *wl; + struct ieee80211_vif *vif; + struct wl12xx_vif *wlvif; + + dwork = container_of(work, struct delayed_work, work); + wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work); + wl = wlvif->wl; + + wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + /* Call mac80211 connection loss */ + if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + goto out; + + vif = wl12xx_wlvif_to_vif(wlvif); + ieee80211_connection_loss(vif); +out: + mutex_unlock(&wl->mutex); +} + static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx) { u8 policy = find_first_zero_bit(wl->rate_policies_map, @@ -2037,15 +2123,15 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) wl12xx_allocate_rate_policy(wl, &wlvif->ap.ucast_rate_idx[i]); - wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES; + wlvif->basic_rate_set = CONF_TX_ENABLED_RATES; /* * TODO: check if basic_rate shouldn't be * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); * instead (the same thing for STA above). */ - wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES; + wlvif->basic_rate = CONF_TX_ENABLED_RATES; /* TODO: this seems to be used only for STA, check it */ - wlvif->rate_set = CONF_TX_AP_ENABLED_RATES; + wlvif->rate_set = CONF_TX_ENABLED_RATES; } wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; @@ -2065,6 +2151,10 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) wl1271_rx_streaming_enable_work); INIT_WORK(&wlvif->rx_streaming_disable_work, wl1271_rx_streaming_disable_work); + INIT_DELAYED_WORK(&wlvif->channel_switch_work, + wlcore_channel_switch_work); + INIT_DELAYED_WORK(&wlvif->connection_loss_work, + wlcore_connection_loss_work); INIT_LIST_HEAD(&wlvif->list); setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, @@ -2072,7 +2162,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) return 0; } -static bool wl12xx_init_fw(struct wl1271 *wl) +static int wl12xx_init_fw(struct wl1271 *wl) { int retries = WL1271_BOOT_RETRIES; bool booted = false; @@ -2138,7 +2228,7 @@ power_off: wl->state = WLCORE_STATE_ON; out: - return booted; + return ret; } static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif) @@ -2198,6 +2288,81 @@ static void wl12xx_force_active_psm(struct wl1271 *wl) } } +struct wlcore_hw_queue_iter_data { + unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)]; + /* current vif */ + struct ieee80211_vif *vif; + /* is the current vif among those iterated */ + bool cur_running; +}; + +static void wlcore_hw_queue_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct wlcore_hw_queue_iter_data *iter_data = data; + + if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE)) + return; + + if (iter_data->cur_running || vif == iter_data->vif) { + iter_data->cur_running = true; + return; + } + + __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map); +} + +static int wlcore_allocate_hw_queue_base(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct wlcore_hw_queue_iter_data iter_data = {}; + int i, q_base; + + iter_data.vif = vif; + + /* mark all bits taken by active interfaces */ + ieee80211_iterate_active_interfaces_atomic(wl->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + wlcore_hw_queue_iter, &iter_data); + + /* the current vif is already running in mac80211 (resume/recovery) */ + if (iter_data.cur_running) { + wlvif->hw_queue_base = vif->hw_queue[0]; + wl1271_debug(DEBUG_MAC80211, + "using pre-allocated hw queue base %d", + wlvif->hw_queue_base); + + /* interface type might have changed type */ + goto adjust_cab_queue; + } + + q_base = find_first_zero_bit(iter_data.hw_queue_map, + WLCORE_NUM_MAC_ADDRESSES); + if (q_base >= WLCORE_NUM_MAC_ADDRESSES) + return -EBUSY; + + wlvif->hw_queue_base = q_base * NUM_TX_QUEUES; + wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d", + wlvif->hw_queue_base); + + for (i = 0; i < NUM_TX_QUEUES; i++) { + wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0; + /* register hw queues in mac80211 */ + vif->hw_queue[i] = wlvif->hw_queue_base + i; + } + +adjust_cab_queue: + /* the last places are reserved for cab queues per interface */ + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES + + wlvif->hw_queue_base / NUM_TX_QUEUES; + else + vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; + + return 0; +} + static int wl1271_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -2206,7 +2371,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, struct vif_counter_data vif_count; int ret = 0; u8 role_type; - bool booted = false; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI; @@ -2244,6 +2408,10 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, goto out; } + ret = wlcore_allocate_hw_queue_base(wl, wlvif); + if (ret < 0) + goto out; + if (wl12xx_need_fw_change(wl, vif_count, true)) { wl12xx_force_active_psm(wl); set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); @@ -2263,11 +2431,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, */ memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN); - booted = wl12xx_init_fw(wl); - if (!booted) { - ret = -EINVAL; + ret = wl12xx_init_fw(wl); + if (ret < 0) goto out; - } } ret = wl12xx_cmd_role_enable(wl, vif->addr, @@ -2314,7 +2480,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, wl1271_info("down"); if (wl->scan.state != WL1271_SCAN_STATE_IDLE && - wl->scan_vif == vif) { + wl->scan_wlvif == wlvif) { /* * Rearm the tx watchdog just before idling scan. This * prevents just-finished scans from triggering the watchdog @@ -2323,11 +2489,21 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, wl->scan.state = WL1271_SCAN_STATE_IDLE; memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); - wl->scan_vif = NULL; + wl->scan_wlvif = NULL; wl->scan.req = NULL; ieee80211_scan_completed(wl->hw, true); } + if (wl->sched_vif == wlvif) { + ieee80211_sched_scan_stopped(wl->hw); + wl->sched_vif = NULL; + } + + if (wl->roc_vif == vif) { + wl->roc_vif = NULL; + ieee80211_remain_on_channel_expired(wl->hw); + } + if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { /* disable active roles */ ret = wl1271_ps_elp_wakeup(wl); @@ -2396,9 +2572,6 @@ deinit: /* Configure for power according to debugfs */ if (sta_auth != WL1271_PSM_ILLEGAL) wl1271_acx_sleep_auth(wl, sta_auth); - /* Configure for power always on */ - else if (wl->quirks & WLCORE_QUIRK_NO_ELP) - wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); /* Configure for ELP power saving */ else wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); @@ -2410,6 +2583,7 @@ unlock: del_timer_sync(&wlvif->rx_streaming_timer); cancel_work_sync(&wlvif->rx_streaming_enable_work); cancel_work_sync(&wlvif->rx_streaming_disable_work); + cancel_delayed_work_sync(&wlvif->connection_loss_work); mutex_lock(&wl->mutex); } @@ -2468,8 +2642,7 @@ static int wl12xx_op_change_interface(struct ieee80211_hw *hw, return ret; } -static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool set_assoc) +static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); @@ -2489,18 +2662,111 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif, /* clear encryption type */ wlvif->encryption_type = KEY_NONE; - if (set_assoc) - set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags); - if (is_ibss) ret = wl12xx_cmd_role_start_ibss(wl, wlvif); - else + else { + if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) { + /* + * TODO: this is an ugly workaround for wl12xx fw + * bug - we are not able to tx/rx after the first + * start_sta, so make dummy start+stop calls, + * and then call start_sta again. + * this should be fixed in the fw. + */ + wl12xx_cmd_role_start_sta(wl, wlvif); + wl12xx_cmd_role_stop_sta(wl, wlvif); + } + ret = wl12xx_cmd_role_start_sta(wl, wlvif); + } + + return ret; +} + +static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb, + int offset) +{ + u8 ssid_len; + const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset, + skb->len - offset); + + if (!ptr) { + wl1271_error("No SSID in IEs!"); + return -ENOENT; + } + + ssid_len = ptr[1]; + if (ssid_len > IEEE80211_MAX_SSID_LEN) { + wl1271_error("SSID is too long!"); + return -EINVAL; + } + + wlvif->ssid_len = ssid_len; + memcpy(wlvif->ssid, ptr+2, ssid_len); + return 0; +} + +static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct sk_buff *skb; + int ieoffset; + + /* we currently only support setting the ssid from the ap probe req */ + if (wlvif->bss_type != BSS_TYPE_STA_BSS) + return -EINVAL; + + skb = ieee80211_ap_probereq_get(wl->hw, vif); + if (!skb) + return -EINVAL; + + ieoffset = offsetof(struct ieee80211_mgmt, + u.probe_req.variable); + wl1271_ssid_set(wlvif, skb, ieoffset); + dev_kfree_skb(skb); + + return 0; +} + +static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct ieee80211_bss_conf *bss_conf, + u32 sta_rate_set) +{ + int ieoffset; + int ret; + + wlvif->aid = bss_conf->aid; + wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef); + wlvif->beacon_int = bss_conf->beacon_int; + wlvif->wmm_enabled = bss_conf->qos; + + set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags); + + /* + * with wl1271, we don't need to update the + * beacon_int and dtim_period, because the firmware + * updates it by itself when the first beacon is + * received after a join. + */ + ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid); if (ret < 0) - goto out; + return ret; - if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) - goto out; + /* + * Get a template for hardware connection maintenance + */ + dev_kfree_skb(wlvif->probereq); + wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl, + wlvif, + NULL); + ieoffset = offsetof(struct ieee80211_mgmt, + u.probe_req.variable); + wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset); + + /* enable the connection monitoring feature */ + ret = wl1271_acx_conn_monit_params(wl, wlvif, true); + if (ret < 0) + return ret; /* * The join command disable the keep-alive mode, shut down its process, @@ -2510,35 +2776,83 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif, */ ret = wl1271_acx_keep_alive_mode(wl, wlvif, true); if (ret < 0) - goto out; + return ret; ret = wl1271_acx_aid(wl, wlvif, wlvif->aid); if (ret < 0) - goto out; + return ret; ret = wl12xx_cmd_build_klv_null_data(wl, wlvif); if (ret < 0) - goto out; + return ret; ret = wl1271_acx_keep_alive_config(wl, wlvif, wlvif->sta.klv_template_id, ACX_KEEP_ALIVE_TPL_VALID); if (ret < 0) - goto out; + return ret; + + /* + * The default fw psm configuration is AUTO, while mac80211 default + * setting is off (ACTIVE), so sync the fw with the correct value. + */ + ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE); + if (ret < 0) + return ret; + + if (sta_rate_set) { + wlvif->rate_set = + wl1271_tx_enabled_rates_get(wl, + sta_rate_set, + wlvif->band); + ret = wl1271_acx_sta_rate_policies(wl, wlvif); + if (ret < 0) + return ret; + } -out: return ret; } -static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif) +static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; + bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS; + + /* make sure we are connected (sta) joined */ + if (sta && + !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + return false; + + /* make sure we are joined (ibss) */ + if (!sta && + test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) + return false; + + if (sta) { + /* use defaults when not associated */ + wlvif->aid = 0; + + /* free probe-request template */ + dev_kfree_skb(wlvif->probereq); + wlvif->probereq = NULL; + + /* disable connection monitor features */ + ret = wl1271_acx_conn_monit_params(wl, wlvif, false); + if (ret < 0) + return ret; + + /* Disable the keep-alive feature */ + ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); + if (ret < 0) + return ret; + } if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) { struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); - wl12xx_cmd_stop_channel_switch(wl); + wl12xx_cmd_stop_channel_switch(wl, wlvif); ieee80211_chswitch_done(vif, false); + cancel_delayed_work(&wlvif->channel_switch_work); } /* invalidate keep-alive template */ @@ -2546,17 +2860,11 @@ static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif) wlvif->sta.klv_template_id, ACX_KEEP_ALIVE_TPL_INVALID); - /* to stop listening to a channel, we disconnect */ - ret = wl12xx_cmd_role_stop_sta(wl, wlvif); - if (ret < 0) - goto out; - /* reset TX security counters on a clean disconnect */ wlvif->tx_security_last_seq_lsb = 0; wlvif->tx_security_seq = 0; -out: - return ret; + return 0; } static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif) @@ -2565,147 +2873,10 @@ static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif) wlvif->rate_set = wlvif->basic_rate_set; } -static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif, - bool idle) -{ - int ret; - bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); - - if (idle == cur_idle) - return 0; - - if (idle) { - /* no need to croc if we weren't busy (e.g. during boot) */ - if (wl12xx_dev_role_started(wlvif)) { - ret = wl12xx_stop_dev(wl, wlvif); - if (ret < 0) - goto out; - } - wlvif->rate_set = - wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); - ret = wl1271_acx_sta_rate_policies(wl, wlvif); - if (ret < 0) - goto out; - clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); - } else { - /* The current firmware only supports sched_scan in idle */ - if (wl->sched_scanning) { - wl1271_scan_sched_scan_stop(wl, wlvif); - ieee80211_sched_scan_stopped(wl->hw); - } - - ret = wl12xx_start_dev(wl, wlvif); - if (ret < 0) - goto out; - set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); - } - -out: - return ret; -} - static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct ieee80211_conf *conf, u32 changed) { - bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); - int channel, ret; - - channel = ieee80211_frequency_to_channel(conf->channel->center_freq); - - /* if the channel changes while joined, join again */ - if (changed & IEEE80211_CONF_CHANGE_CHANNEL && - ((wlvif->band != conf->channel->band) || - (wlvif->channel != channel) || - (wlvif->channel_type != conf->channel_type))) { - /* send all pending packets */ - ret = wlcore_tx_work_locked(wl); - if (ret < 0) - return ret; - - wlvif->band = conf->channel->band; - wlvif->channel = channel; - wlvif->channel_type = conf->channel_type; - - if (is_ap) { - wl1271_set_band_rate(wl, wlvif); - ret = wl1271_init_ap_rates(wl, wlvif); - if (ret < 0) - wl1271_error("AP rate policy change failed %d", - ret); - } else { - /* - * FIXME: the mac80211 should really provide a fixed - * rate to use here. for now, just use the smallest - * possible rate for the band as a fixed rate for - * association frames and other control messages. - */ - if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) - wl1271_set_band_rate(wl, wlvif); - - wlvif->basic_rate = - wl1271_tx_min_rate_get(wl, - wlvif->basic_rate_set); - ret = wl1271_acx_sta_rate_policies(wl, wlvif); - if (ret < 0) - wl1271_warning("rate policy for channel " - "failed %d", ret); - - /* - * change the ROC channel. do it only if we are - * not idle. otherwise, CROC will be called - * anyway. - */ - if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, - &wlvif->flags) && - wl12xx_dev_role_started(wlvif) && - !(conf->flags & IEEE80211_CONF_IDLE)) { - ret = wl12xx_stop_dev(wl, wlvif); - if (ret < 0) - return ret; - - ret = wl12xx_start_dev(wl, wlvif); - if (ret < 0) - return ret; - } - } - } - - if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) { - - if ((conf->flags & IEEE80211_CONF_PS) && - test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && - !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) { - - int ps_mode; - char *ps_mode_str; - - if (wl->conf.conn.forced_ps) { - ps_mode = STATION_POWER_SAVE_MODE; - ps_mode_str = "forced"; - } else { - ps_mode = STATION_AUTO_PS_MODE; - ps_mode_str = "auto"; - } - - wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str); - - ret = wl1271_ps_set_mode(wl, wlvif, ps_mode); - - if (ret < 0) - wl1271_warning("enter %s ps failed %d", - ps_mode_str, ret); - - } else if (!(conf->flags & IEEE80211_CONF_PS) && - test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) { - - wl1271_debug(DEBUG_PSM, "auto ps disabled"); - - ret = wl1271_ps_set_mode(wl, wlvif, - STATION_ACTIVE_MODE); - if (ret < 0) - wl1271_warning("exit auto ps failed %d", ret); - } - } + int ret; if (conf->power_level != wlvif->power_level) { ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level); @@ -2723,37 +2894,17 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif; struct ieee80211_conf *conf = &hw->conf; - int channel, ret = 0; - - channel = ieee80211_frequency_to_channel(conf->channel->center_freq); + int ret = 0; - wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s" + wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s" " changed 0x%x", - channel, conf->flags & IEEE80211_CONF_PS ? "on" : "off", conf->power_level, conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use", changed); - /* - * mac80211 will go to idle nearly immediately after transmitting some - * frames, such as the deauth. To make sure those frames reach the air, - * wait here until the TX queue is fully flushed. - */ - if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || - ((changed & IEEE80211_CONF_CHANGE_IDLE) && - (conf->flags & IEEE80211_CONF_IDLE))) - wl1271_tx_flush(wl); - mutex_lock(&wl->mutex); - /* we support configuring the channel and band even while off */ - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - wl->band = conf->channel->band; - wl->channel = channel; - wl->channel_type = conf->channel_type; - } - if (changed & IEEE80211_CONF_CHANGE_POWER) wl->power_level = conf->power_level; @@ -3073,10 +3224,7 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, * stop the queues and flush to ensure the next packets are * in sync with FW spare block accounting */ - mutex_lock(&wl->mutex); wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK); - mutex_unlock(&wl->mutex); - wl1271_tx_flush(wl); } @@ -3202,6 +3350,29 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, } EXPORT_SYMBOL_GPL(wlcore_set_key); +void wlcore_regdomain_config(struct wl1271 *wl) +{ + int ret; + + if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF)) + return; + + mutex_lock(&wl->mutex); + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wlcore_cmd_regdomain_config_locked(wl); + if (ret < 0) { + wl12xx_queue_recovery_work(wl); + goto out; + } + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); +} + static int wl1271_op_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_scan_request *req) @@ -3241,7 +3412,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, goto out_sleep; } - ret = wl1271_scan(hw->priv, vif, ssid, len, req); + ret = wlcore_scan(hw->priv, vif, ssid, len, req); out_sleep: wl1271_ps_elp_sleep(wl); out: @@ -3254,6 +3425,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan"); @@ -3271,7 +3443,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, goto out; if (wl->scan.state != WL1271_SCAN_STATE_DONE) { - ret = wl1271_scan_stop(wl); + ret = wl->ops->scan_stop(wl, wlvif); if (ret < 0) goto out_sleep; } @@ -3284,7 +3456,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, wl->scan.state = WL1271_SCAN_STATE_IDLE; memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); - wl->scan_vif = NULL; + wl->scan_wlvif = NULL; wl->scan.req = NULL; ieee80211_scan_completed(wl->hw, true); @@ -3318,15 +3490,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, if (ret < 0) goto out; - ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies); + ret = wl->ops->sched_scan_start(wl, wlvif, req, ies); if (ret < 0) goto out_sleep; - ret = wl1271_scan_sched_scan_start(wl, wlvif); - if (ret < 0) - goto out_sleep; - - wl->sched_scanning = true; + wl->sched_vif = wlvif; out_sleep: wl1271_ps_elp_sleep(wl); @@ -3353,7 +3521,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw, if (ret < 0) goto out; - wl1271_scan_sched_scan_stop(wl, wlvif); + wl->ops->sched_scan_stop(wl, wlvif); wl1271_ps_elp_sleep(wl); out: @@ -3418,30 +3586,6 @@ out: return ret; } -static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb, - int offset) -{ - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); - u8 ssid_len; - const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset, - skb->len - offset); - - if (!ptr) { - wl1271_error("No SSID in IEs!"); - return -ENOENT; - } - - ssid_len = ptr[1]; - if (ssid_len > IEEE80211_MAX_SSID_LEN) { - wl1271_error("SSID is too long!"); - return -EINVAL; - } - - wlvif->ssid_len = ssid_len; - memcpy(wlvif->ssid, ptr+2, ssid_len); - return 0; -} - static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset) { int len; @@ -3622,7 +3766,7 @@ static int wlcore_set_beacon_template(struct wl1271 *wl, wl1271_debug(DEBUG_MASTER, "beacon updated"); - ret = wl1271_ssid_set(vif, beacon, ieoffset); + ret = wl1271_ssid_set(wlvif, beacon, ieoffset); if (ret < 0) { dev_kfree_skb(beacon); goto out; @@ -3639,6 +3783,12 @@ static int wlcore_set_beacon_template(struct wl1271 *wl, goto out; } + wlvif->wmm_enabled = + cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, + WLAN_OUI_TYPE_MICROSOFT_WMM, + beacon->data + ieoffset, + beacon->len - ieoffset); + /* * In case we already have a probe-resp beacon set explicitly * by usermode, don't use the beacon data. @@ -3692,7 +3842,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); int ret = 0; - if ((changed & BSS_CHANGED_BEACON_INT)) { + if (changed & BSS_CHANGED_BEACON_INT) { wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d", bss_conf->beacon_int); @@ -3705,7 +3855,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, wl1271_ap_set_probe_resp_tmpl(wl, rate, vif); } - if ((changed & BSS_CHANGED_BEACON)) { + if (changed & BSS_CHANGED_BEACON) { ret = wlcore_set_beacon_template(wl, vif, is_ap); if (ret < 0) goto out; @@ -3726,7 +3876,7 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret = 0; - if ((changed & BSS_CHANGED_BASIC_RATES)) { + if (changed & BSS_CHANGED_BASIC_RATES) { u32 rates = bss_conf->basic_rates; wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, @@ -3757,7 +3907,7 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl, if (ret < 0) goto out; - if ((changed & BSS_CHANGED_BEACON_ENABLED)) { + if (changed & BSS_CHANGED_BEACON_ENABLED) { if (bss_conf->enable_beacon) { if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { ret = wl12xx_cmd_role_start_ap(wl, wlvif); @@ -3804,6 +3954,79 @@ out: return; } +static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct ieee80211_bss_conf *bss_conf, + u32 sta_rate_set) +{ + u32 rates; + int ret; + + wl1271_debug(DEBUG_MAC80211, + "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x", + bss_conf->bssid, bss_conf->aid, + bss_conf->beacon_int, + bss_conf->basic_rates, sta_rate_set); + + wlvif->beacon_int = bss_conf->beacon_int; + rates = bss_conf->basic_rates; + wlvif->basic_rate_set = + wl1271_tx_enabled_rates_get(wl, rates, + wlvif->band); + wlvif->basic_rate = + wl1271_tx_min_rate_get(wl, + wlvif->basic_rate_set); + + if (sta_rate_set) + wlvif->rate_set = + wl1271_tx_enabled_rates_get(wl, + sta_rate_set, + wlvif->band); + + /* we only support sched_scan while not connected */ + if (wl->sched_vif == wlvif) + wl->ops->sched_scan_stop(wl, wlvif); + + ret = wl1271_acx_sta_rate_policies(wl, wlvif); + if (ret < 0) + return ret; + + ret = wl12xx_cmd_build_null_data(wl, wlvif); + if (ret < 0) + return ret; + + ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif)); + if (ret < 0) + return ret; + + wlcore_set_ssid(wl, wlvif); + + set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); + + return 0; +} + +static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret; + + /* revert back to minimum rates for the current band */ + wl1271_set_band_rate(wl, wlvif); + wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + + ret = wl1271_acx_sta_rate_policies(wl, wlvif); + if (ret < 0) + return ret; + + if (wlvif->bss_type == BSS_TYPE_STA_BSS && + test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) { + ret = wl12xx_cmd_role_stop_sta(wl, wlvif); + if (ret < 0) + return ret; + } + + clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); + return 0; +} /* STA/IBSS mode changes */ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, struct ieee80211_vif *vif, @@ -3811,7 +4034,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, u32 changed) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); - bool do_join = false, set_assoc = false; + bool do_join = false; bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); bool ibss_joined = false; u32 sta_rate_set = 0; @@ -3832,9 +4055,8 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags); ibss_joined = true; } else { - if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, - &wlvif->flags)) - wl1271_unjoin(wl, wlvif); + wlcore_unset_assoc(wl, wlvif); + wl12xx_cmd_role_stop_sta(wl, wlvif); } } @@ -3852,13 +4074,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, do_join = true; } - if (changed & BSS_CHANGED_IDLE && !is_ibss) { - ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle); - if (ret < 0) - wl1271_warning("idle mode change failed %d", ret); - } - - if ((changed & BSS_CHANGED_CQM)) { + if (changed & BSS_CHANGED_CQM) { bool enable = false; if (bss_conf->cqm_rssi_thold) enable = true; @@ -3870,150 +4086,39 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, wlvif->rssi_thold = bss_conf->cqm_rssi_thold; } - if (changed & BSS_CHANGED_BSSID) - if (!is_zero_ether_addr(bss_conf->bssid)) { - ret = wl12xx_cmd_build_null_data(wl, wlvif); - if (ret < 0) - goto out; - - ret = wl1271_build_qos_null_data(wl, vif); - if (ret < 0) - goto out; - } - - if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) { + if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT | + BSS_CHANGED_ASSOC)) { rcu_read_lock(); sta = ieee80211_find_sta(vif, bss_conf->bssid); - if (!sta) - goto sta_not_found; - - /* save the supp_rates of the ap */ - sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band]; - if (sta->ht_cap.ht_supported) - sta_rate_set |= - (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) | - (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET); - sta_ht_cap = sta->ht_cap; - sta_exists = true; - -sta_not_found: + if (sta) { + u8 *rx_mask = sta->ht_cap.mcs.rx_mask; + + /* save the supp_rates of the ap */ + sta_rate_set = sta->supp_rates[wlvif->band]; + if (sta->ht_cap.ht_supported) + sta_rate_set |= + (rx_mask[0] << HW_HT_RATES_OFFSET) | + (rx_mask[1] << HW_MIMO_RATES_OFFSET); + sta_ht_cap = sta->ht_cap; + sta_exists = true; + } + rcu_read_unlock(); } - if ((changed & BSS_CHANGED_ASSOC)) { - if (bss_conf->assoc) { - u32 rates; - int ieoffset; - wlvif->aid = bss_conf->aid; - wlvif->channel_type = - cfg80211_get_chandef_type(&bss_conf->chandef); - wlvif->beacon_int = bss_conf->beacon_int; - do_join = true; - set_assoc = true; - - /* - * use basic rates from AP, and determine lowest rate - * to use with control frames. - */ - rates = bss_conf->basic_rates; - wlvif->basic_rate_set = - wl1271_tx_enabled_rates_get(wl, rates, - wlvif->band); - wlvif->basic_rate = - wl1271_tx_min_rate_get(wl, - wlvif->basic_rate_set); - if (sta_rate_set) - wlvif->rate_set = - wl1271_tx_enabled_rates_get(wl, - sta_rate_set, - wlvif->band); - ret = wl1271_acx_sta_rate_policies(wl, wlvif); - if (ret < 0) - goto out; - - /* - * with wl1271, we don't need to update the - * beacon_int and dtim_period, because the firmware - * updates it by itself when the first beacon is - * received after a join. - */ - ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid); + if (changed & BSS_CHANGED_BSSID) { + if (!is_zero_ether_addr(bss_conf->bssid)) { + ret = wlcore_set_bssid(wl, wlvif, bss_conf, + sta_rate_set); if (ret < 0) goto out; - /* - * Get a template for hardware connection maintenance - */ - dev_kfree_skb(wlvif->probereq); - wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl, - wlvif, - NULL); - ieoffset = offsetof(struct ieee80211_mgmt, - u.probe_req.variable); - wl1271_ssid_set(vif, wlvif->probereq, ieoffset); - - /* enable the connection monitoring feature */ - ret = wl1271_acx_conn_monit_params(wl, wlvif, true); - if (ret < 0) - goto out; + /* Need to update the BSSID (for filtering etc) */ + do_join = true; } else { - /* use defaults when not associated */ - bool was_assoc = - !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, - &wlvif->flags); - bool was_ifup = - !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT, - &wlvif->flags); - wlvif->aid = 0; - - /* free probe-request template */ - dev_kfree_skb(wlvif->probereq); - wlvif->probereq = NULL; - - /* revert back to minimum rates for the current band */ - wl1271_set_band_rate(wl, wlvif); - wlvif->basic_rate = - wl1271_tx_min_rate_get(wl, - wlvif->basic_rate_set); - ret = wl1271_acx_sta_rate_policies(wl, wlvif); - if (ret < 0) - goto out; - - /* disable connection monitor features */ - ret = wl1271_acx_conn_monit_params(wl, wlvif, false); - - /* Disable the keep-alive feature */ - ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); + ret = wlcore_clear_bssid(wl, wlvif); if (ret < 0) goto out; - - /* restore the bssid filter and go to dummy bssid */ - if (was_assoc) { - /* - * we might have to disable roc, if there was - * no IF_OPER_UP notification. - */ - if (!was_ifup) { - ret = wl12xx_croc(wl, wlvif->role_id); - if (ret < 0) - goto out; - } - /* - * (we also need to disable roc in case of - * roaming on the same channel. until we will - * have a better flow...) - */ - if (test_bit(wlvif->dev_role_id, wl->roc_map)) { - ret = wl12xx_croc(wl, - wlvif->dev_role_id); - if (ret < 0) - goto out; - } - - wl1271_unjoin(wl, wlvif); - if (!bss_conf->idle) - wl12xx_start_dev(wl, wlvif); - } } } @@ -4043,71 +4148,87 @@ sta_not_found: goto out; if (do_join) { - ret = wl1271_join(wl, wlvif, set_assoc); + ret = wlcore_join(wl, wlvif); if (ret < 0) { wl1271_warning("cmd join failed %d", ret); goto out; } + } - /* ROC until connected (after EAPOL exchange) */ - if (!is_ibss) { - ret = wl12xx_roc(wl, wlvif, wlvif->role_id); + if (changed & BSS_CHANGED_ASSOC) { + if (bss_conf->assoc) { + ret = wlcore_set_assoc(wl, wlvif, bss_conf, + sta_rate_set); if (ret < 0) goto out; if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags)) wl12xx_set_authorized(wl, wlvif); + } else { + wlcore_unset_assoc(wl, wlvif); } - /* - * stop device role if started (we might already be in - * STA/IBSS role). - */ - if (wl12xx_dev_role_started(wlvif)) { - ret = wl12xx_stop_dev(wl, wlvif); + } + + if (changed & BSS_CHANGED_PS) { + if ((bss_conf->ps) && + test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && + !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) { + int ps_mode; + char *ps_mode_str; + + if (wl->conf.conn.forced_ps) { + ps_mode = STATION_POWER_SAVE_MODE; + ps_mode_str = "forced"; + } else { + ps_mode = STATION_AUTO_PS_MODE; + ps_mode_str = "auto"; + } + + wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str); + + ret = wl1271_ps_set_mode(wl, wlvif, ps_mode); if (ret < 0) - goto out; + wl1271_warning("enter %s ps failed %d", + ps_mode_str, ret); + } else if (!bss_conf->ps && + test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) { + wl1271_debug(DEBUG_PSM, "auto ps disabled"); + + ret = wl1271_ps_set_mode(wl, wlvif, + STATION_ACTIVE_MODE); + if (ret < 0) + wl1271_warning("exit auto ps failed %d", ret); } } /* Handle new association with HT. Do this after join. */ - if (sta_exists) { - if ((changed & BSS_CHANGED_HT) && - (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) { - ret = wl1271_acx_set_ht_capabilities(wl, - &sta_ht_cap, - true, - wlvif->sta.hlid); - if (ret < 0) { - wl1271_warning("Set ht cap true failed %d", - ret); - goto out; - } + if (sta_exists && + (changed & BSS_CHANGED_HT)) { + bool enabled = + bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT; + + ret = wlcore_hw_set_peer_cap(wl, + &sta_ht_cap, + enabled, + wlvif->rate_set, + wlvif->sta.hlid); + if (ret < 0) { + wl1271_warning("Set ht cap failed %d", ret); + goto out; + } - /* handle new association without HT and disassociation */ - else if (changed & BSS_CHANGED_ASSOC) { - ret = wl1271_acx_set_ht_capabilities(wl, - &sta_ht_cap, - false, - wlvif->sta.hlid); + + if (enabled) { + ret = wl1271_acx_set_ht_information(wl, wlvif, + bss_conf->ht_operation_mode); if (ret < 0) { - wl1271_warning("Set ht cap false failed %d", + wl1271_warning("Set ht information failed %d", ret); goto out; } } } - /* Handle HT information change. Done after join. */ - if ((changed & BSS_CHANGED_HT) && - (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) { - ret = wl1271_acx_set_ht_information(wl, wlvif, - bss_conf->ht_operation_mode); - if (ret < 0) { - wl1271_warning("Set ht information failed %d", ret); - goto out; - } - } - /* Handle arp filtering. Done after join. */ if ((changed & BSS_CHANGED_ARP_FILTER) || (!is_ibss && (changed & BSS_CHANGED_QOS))) { @@ -4115,8 +4236,7 @@ sta_not_found: wlvif->sta.qos = bss_conf->qos; WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS); - if (bss_conf->arp_addr_cnt == 1 && - bss_conf->arp_filter_enabled) { + if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) { wlvif->ip_addr = addr; /* * The template should have been configured only upon @@ -4157,15 +4277,15 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); int ret; - wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x", - (int)changed); + wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x", + wlvif->role_id, (int)changed); /* * make sure to cancel pending disconnections if our association * state changed */ if (!is_ap && (changed & BSS_CHANGED_ASSOC)) - cancel_delayed_work_sync(&wl->connection_loss_work); + cancel_delayed_work_sync(&wlvif->connection_loss_work); if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) @@ -4194,6 +4314,76 @@ out: mutex_unlock(&wl->mutex); } +static int wlcore_op_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)", + ieee80211_frequency_to_channel(ctx->def.chan->center_freq), + cfg80211_get_chandef_type(&ctx->def)); + return 0; +} + +static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)", + ieee80211_frequency_to_channel(ctx->def.chan->center_freq), + cfg80211_get_chandef_type(&ctx->def)); +} + +static void wlcore_op_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + u32 changed) +{ + wl1271_debug(DEBUG_MAC80211, + "mac80211 change chanctx %d (type %d) changed 0x%x", + ieee80211_frequency_to_channel(ctx->def.chan->center_freq), + cfg80211_get_chandef_type(&ctx->def), changed); +} + +static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int channel = ieee80211_frequency_to_channel( + ctx->def.chan->center_freq); + + wl1271_debug(DEBUG_MAC80211, + "mac80211 assign chanctx (role %d) %d (type %d)", + wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def)); + + mutex_lock(&wl->mutex); + + wlvif->band = ctx->def.chan->band; + wlvif->channel = channel; + wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def); + + /* update default rates according to the band */ + wl1271_set_band_rate(wl, wlvif); + + mutex_unlock(&wl->mutex); + + return 0; +} + +static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + + wl1271_debug(DEBUG_MAC80211, + "mac80211 unassign chanctx (role %d) %d (type %d)", + wlvif->role_id, + ieee80211_frequency_to_channel(ctx->def.chan->center_freq), + cfg80211_get_chandef_type(&ctx->def)); + + wl1271_tx_flush(wl); +} + static int wl1271_op_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, const struct ieee80211_tx_queue_params *params) @@ -4321,8 +4511,6 @@ void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) return; clear_bit(hlid, wlvif->ap.sta_hlid_map); - memset(wl->links[hlid].addr, 0, ETH_ALEN); - wl->links[hlid].ba_bitmap = 0; __clear_bit(hlid, &wl->ap_ps_map); __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); wl12xx_free_link(wl, wlvif, &hlid); @@ -4382,6 +4570,45 @@ static int wl12xx_sta_remove(struct wl1271 *wl, return ret; } +static void wlcore_roc_if_possible(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + if (find_first_bit(wl->roc_map, + WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) + return; + + if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) + return; + + wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel); +} + +static void wlcore_update_inconn_sta(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct wl1271_station *wl_sta, + bool in_connection) +{ + if (in_connection) { + if (WARN_ON(wl_sta->in_connection)) + return; + wl_sta->in_connection = true; + if (!wlvif->inconn_count++) + wlcore_roc_if_possible(wl, wlvif); + } else { + if (!wl_sta->in_connection) + return; + + wl_sta->in_connection = false; + wlvif->inconn_count--; + if (WARN_ON(wlvif->inconn_count < 0)) + return; + + if (!wlvif->inconn_count) + if (test_bit(wlvif->role_id, wl->roc_map)) + wl12xx_croc(wl, wlvif->role_id); + } +} + static int wl12xx_update_sta_state(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct ieee80211_sta *sta, @@ -4400,8 +4627,13 @@ static int wl12xx_update_sta_state(struct wl1271 *wl, /* Add station (AP mode) */ if (is_ap && old_state == IEEE80211_STA_NOTEXIST && - new_state == IEEE80211_STA_NONE) - return wl12xx_sta_add(wl, wlvif, sta); + new_state == IEEE80211_STA_NONE) { + ret = wl12xx_sta_add(wl, wlvif, sta); + if (ret) + return ret; + + wlcore_update_inconn_sta(wl, wlvif, wl_sta, true); + } /* Remove station (AP mode) */ if (is_ap && @@ -4409,35 +4641,59 @@ static int wl12xx_update_sta_state(struct wl1271 *wl, new_state == IEEE80211_STA_NOTEXIST) { /* must not fail */ wl12xx_sta_remove(wl, wlvif, sta); - return 0; + + wlcore_update_inconn_sta(wl, wlvif, wl_sta, false); } /* Authorize station (AP mode) */ if (is_ap && new_state == IEEE80211_STA_AUTHORIZED) { - ret = wl12xx_cmd_set_peer_state(wl, hlid); + ret = wl12xx_cmd_set_peer_state(wl, wlvif, hlid); if (ret < 0) return ret; ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true, hlid); - return ret; + if (ret) + return ret; + + wlcore_update_inconn_sta(wl, wlvif, wl_sta, false); } /* Authorize station */ if (is_sta && new_state == IEEE80211_STA_AUTHORIZED) { set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags); - return wl12xx_set_authorized(wl, wlvif); + ret = wl12xx_set_authorized(wl, wlvif); + if (ret) + return ret; } if (is_sta && old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags); - return 0; + clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags); } + /* clear ROCs on failure or authorization */ + if (is_sta && + (new_state == IEEE80211_STA_AUTHORIZED || + new_state == IEEE80211_STA_NOTEXIST)) { + if (test_bit(wlvif->role_id, wl->roc_map)) + wl12xx_croc(wl, wlvif->role_id); + } + + if (is_sta && + old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) { + if (find_first_bit(wl->roc_map, + WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) { + WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID); + wl12xx_roc(wl, wlvif, wlvif->role_id, + wlvif->band, wlvif->channel); + } + } return 0; } @@ -4502,18 +4758,18 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, if (wlvif->bss_type == BSS_TYPE_STA_BSS) { hlid = wlvif->sta.hlid; - ba_bitmap = &wlvif->sta.ba_rx_bitmap; } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) { struct wl1271_station *wl_sta; wl_sta = (struct wl1271_station *)sta->drv_priv; hlid = wl_sta->hlid; - ba_bitmap = &wl->links[hlid].ba_bitmap; } else { ret = -EINVAL; goto out; } + ba_bitmap = &wl->links[hlid].ba_bitmap; + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; @@ -4575,7 +4831,9 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, * Falling break here on purpose for all TX APDU commands. */ case IEEE80211_AMPDU_TX_START: - case IEEE80211_AMPDU_TX_STOP: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: case IEEE80211_AMPDU_TX_OPERATIONAL: ret = -EINVAL; break; @@ -4665,12 +4923,23 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, /* TODO: change mac80211 to pass vif as param */ wl12xx_for_each_wlvif_sta(wl, wlvif) { - ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch); + unsigned long delay_usec; + + ret = wl->ops->channel_switch(wl, wlvif, ch_switch); + if (ret) + goto out_sleep; - if (!ret) - set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); + set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); + + /* indicate failure 5 seconds after channel switch time */ + delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) * + ch_switch->count; + ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work, + usecs_to_jiffies(delay_usec) + + msecs_to_jiffies(5000)); } +out_sleep: wl1271_ps_elp_sleep(wl); out: @@ -4684,6 +4953,144 @@ static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop) wl1271_tx_flush(wl); } +static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel *chan, + int duration) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct wl1271 *wl = hw->priv; + int channel, ret = 0; + + channel = ieee80211_frequency_to_channel(chan->center_freq); + + wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)", + channel, wlvif->role_id); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + /* return EBUSY if we can't ROC right now */ + if (WARN_ON(wl->roc_vif || + find_first_bit(wl->roc_map, + WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) { + ret = -EBUSY; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl12xx_start_dev(wl, wlvif, chan->band, channel); + if (ret < 0) + goto out_sleep; + + wl->roc_vif = vif; + ieee80211_queue_delayed_work(hw, &wl->roc_complete_work, + msecs_to_jiffies(duration)); +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + return ret; +} + +static int __wlcore_roc_completed(struct wl1271 *wl) +{ + struct wl12xx_vif *wlvif; + int ret; + + /* already completed */ + if (unlikely(!wl->roc_vif)) + return 0; + + wlvif = wl12xx_vif_to_data(wl->roc_vif); + + if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) + return -EBUSY; + + ret = wl12xx_stop_dev(wl, wlvif); + if (ret < 0) + return ret; + + wl->roc_vif = NULL; + + return 0; +} + +static int wlcore_roc_completed(struct wl1271 *wl) +{ + int ret; + + wl1271_debug(DEBUG_MAC80211, "roc complete"); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EBUSY; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = __wlcore_roc_completed(wl); + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static void wlcore_roc_complete_work(struct work_struct *work) +{ + struct delayed_work *dwork; + struct wl1271 *wl; + int ret; + + dwork = container_of(work, struct delayed_work, work); + wl = container_of(dwork, struct wl1271, roc_complete_work); + + ret = wlcore_roc_completed(wl); + if (!ret) + ieee80211_remain_on_channel_expired(wl->hw); +} + +static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw) +{ + struct wl1271 *wl = hw->priv; + + wl1271_debug(DEBUG_MAC80211, "mac80211 croc"); + + /* TODO: per-vif */ + wl1271_tx_flush(wl); + + /* + * we can't just flush_work here, because it might deadlock + * (as we might get called from the same workqueue) + */ + cancel_delayed_work_sync(&wl->roc_complete_work); + wlcore_roc_completed(wl); + + return 0; +} + +static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 changed) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct wl1271 *wl = hw->priv; + + wlcore_hw_sta_rc_update(wl, wlvif, sta, changed); +} + static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw) { struct wl1271 *wl = hw->priv; @@ -4747,20 +5154,20 @@ static struct ieee80211_rate wl1271_rates[] = { /* can't be const, mac80211 writes to this */ static struct ieee80211_channel wl1271_channels[] = { - { .hw_value = 1, .center_freq = 2412, .max_power = 25 }, - { .hw_value = 2, .center_freq = 2417, .max_power = 25 }, - { .hw_value = 3, .center_freq = 2422, .max_power = 25 }, - { .hw_value = 4, .center_freq = 2427, .max_power = 25 }, - { .hw_value = 5, .center_freq = 2432, .max_power = 25 }, - { .hw_value = 6, .center_freq = 2437, .max_power = 25 }, - { .hw_value = 7, .center_freq = 2442, .max_power = 25 }, - { .hw_value = 8, .center_freq = 2447, .max_power = 25 }, - { .hw_value = 9, .center_freq = 2452, .max_power = 25 }, - { .hw_value = 10, .center_freq = 2457, .max_power = 25 }, - { .hw_value = 11, .center_freq = 2462, .max_power = 25 }, - { .hw_value = 12, .center_freq = 2467, .max_power = 25 }, - { .hw_value = 13, .center_freq = 2472, .max_power = 25 }, - { .hw_value = 14, .center_freq = 2484, .max_power = 25 }, + { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR }, }; /* can't be const, mac80211 writes to this */ @@ -4801,40 +5208,40 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = { /* 5 GHz band channels for WL1273 */ static struct ieee80211_channel wl1271_channels_5ghz[] = { - { .hw_value = 7, .center_freq = 5035, .max_power = 25 }, - { .hw_value = 8, .center_freq = 5040, .max_power = 25 }, - { .hw_value = 9, .center_freq = 5045, .max_power = 25 }, - { .hw_value = 11, .center_freq = 5055, .max_power = 25 }, - { .hw_value = 12, .center_freq = 5060, .max_power = 25 }, - { .hw_value = 16, .center_freq = 5080, .max_power = 25 }, - { .hw_value = 34, .center_freq = 5170, .max_power = 25 }, - { .hw_value = 36, .center_freq = 5180, .max_power = 25 }, - { .hw_value = 38, .center_freq = 5190, .max_power = 25 }, - { .hw_value = 40, .center_freq = 5200, .max_power = 25 }, - { .hw_value = 42, .center_freq = 5210, .max_power = 25 }, - { .hw_value = 44, .center_freq = 5220, .max_power = 25 }, - { .hw_value = 46, .center_freq = 5230, .max_power = 25 }, - { .hw_value = 48, .center_freq = 5240, .max_power = 25 }, - { .hw_value = 52, .center_freq = 5260, .max_power = 25 }, - { .hw_value = 56, .center_freq = 5280, .max_power = 25 }, - { .hw_value = 60, .center_freq = 5300, .max_power = 25 }, - { .hw_value = 64, .center_freq = 5320, .max_power = 25 }, - { .hw_value = 100, .center_freq = 5500, .max_power = 25 }, - { .hw_value = 104, .center_freq = 5520, .max_power = 25 }, - { .hw_value = 108, .center_freq = 5540, .max_power = 25 }, - { .hw_value = 112, .center_freq = 5560, .max_power = 25 }, - { .hw_value = 116, .center_freq = 5580, .max_power = 25 }, - { .hw_value = 120, .center_freq = 5600, .max_power = 25 }, - { .hw_value = 124, .center_freq = 5620, .max_power = 25 }, - { .hw_value = 128, .center_freq = 5640, .max_power = 25 }, - { .hw_value = 132, .center_freq = 5660, .max_power = 25 }, - { .hw_value = 136, .center_freq = 5680, .max_power = 25 }, - { .hw_value = 140, .center_freq = 5700, .max_power = 25 }, - { .hw_value = 149, .center_freq = 5745, .max_power = 25 }, - { .hw_value = 153, .center_freq = 5765, .max_power = 25 }, - { .hw_value = 157, .center_freq = 5785, .max_power = 25 }, - { .hw_value = 161, .center_freq = 5805, .max_power = 25 }, - { .hw_value = 165, .center_freq = 5825, .max_power = 25 }, + { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR }, }; static struct ieee80211_supported_band wl1271_band_5ghz = { @@ -4875,6 +5282,14 @@ static const struct ieee80211_ops wl1271_ops = { .set_bitrate_mask = wl12xx_set_bitrate_mask, .channel_switch = wl12xx_op_channel_switch, .flush = wlcore_op_flush, + .remain_on_channel = wlcore_op_remain_on_channel, + .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel, + .add_chanctx = wlcore_op_add_chanctx, + .remove_chanctx = wlcore_op_remove_chanctx, + .change_chanctx = wlcore_op_change_chanctx, + .assign_vif_chanctx = wlcore_op_assign_vif_chanctx, + .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx, + .sta_rc_update = wlcore_op_sta_rc_update, CFG80211_TESTMODE_CMD(wl1271_tm_cmd) }; @@ -5044,34 +5459,6 @@ static struct bin_attribute fwlog_attr = { .read = wl1271_sysfs_read_fwlog, }; -static void wl1271_connection_loss_work(struct work_struct *work) -{ - struct delayed_work *dwork; - struct wl1271 *wl; - struct ieee80211_vif *vif; - struct wl12xx_vif *wlvif; - - dwork = container_of(work, struct delayed_work, work); - wl = container_of(dwork, struct wl1271, connection_loss_work); - - wl1271_info("Connection loss work."); - - mutex_lock(&wl->mutex); - - if (unlikely(wl->state != WLCORE_STATE_ON)) - goto out; - - /* Call mac80211 connection loss */ - wl12xx_for_each_wlvif_sta(wl, wlvif) { - if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) - goto out; - vif = wl12xx_wlvif_to_vif(wlvif); - ieee80211_connection_loss(vif); - } -out: - mutex_unlock(&wl->mutex); -} - static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic) { int i; @@ -5117,7 +5504,7 @@ static int wl12xx_get_hw_info(struct wl1271 *wl) ret = wl12xx_set_power_on(wl); if (ret < 0) - goto out; + return ret; ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id); if (ret < 0) @@ -5207,10 +5594,9 @@ static const struct ieee80211_iface_limit wlcore_iface_limits[] = { }, }; -static const struct ieee80211_iface_combination +static struct ieee80211_iface_combination wlcore_iface_combinations[] = { { - .num_different_channels = 1, .max_interfaces = 3, .limits = wlcore_iface_limits, .n_limits = ARRAY_SIZE(wlcore_iface_limits), @@ -5219,6 +5605,7 @@ wlcore_iface_combinations[] = { static int wl1271_init_ieee80211(struct wl1271 *wl) { + int i; static const u32 cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, @@ -5249,7 +5636,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) IEEE80211_HW_AP_LINK_PS | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_TX_AMPDU_SETUP_IN_HW | - IEEE80211_HW_SCAN_WHILE_IDLE; + IEEE80211_HW_QUEUE_CONTROL; wl->hw->wiphy->cipher_suites = cipher_suites; wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); @@ -5271,6 +5658,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - sizeof(struct ieee80211_header); + wl->hw->wiphy->max_remain_on_channel_duration = 5000; + wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; @@ -5279,6 +5668,22 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) ARRAY_SIZE(wl1271_channels_5ghz) > WL1271_MAX_CHANNELS); /* + * clear channel flags from the previous usage + * and restore max_power & max_antenna_gain values. + */ + for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) { + wl1271_band_2ghz.channels[i].flags = 0; + wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR; + wl1271_band_2ghz.channels[i].max_antenna_gain = 0; + } + + for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) { + wl1271_band_5ghz.channels[i].flags = 0; + wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR; + wl1271_band_5ghz.channels[i].max_antenna_gain = 0; + } + + /* * We keep local copies of the band structs because we need to * modify them on a per-device basis. */ @@ -5298,7 +5703,14 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl->bands[IEEE80211_BAND_5GHZ]; - wl->hw->queues = 4; + /* + * allow 4 queues per mac address we support + + * 1 cab queue per mac + one global offchannel Tx queue + */ + wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1; + + /* the last queue is the offchannel queue */ + wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1; wl->hw->max_rates = 1; wl->hw->wiphy->reg_notifier = wl1271_reg_notify; @@ -5311,6 +5723,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; /* allowed interface combinations */ + wlcore_iface_combinations[0].num_different_channels = wl->num_channels; wl->hw->wiphy->iface_combinations = wlcore_iface_combinations; wl->hw->wiphy->n_iface_combinations = ARRAY_SIZE(wlcore_iface_combinations); @@ -5327,7 +5740,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) #define WL1271_DEFAULT_CHANNEL 0 -struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size) +struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, + u32 mbox_size) { struct ieee80211_hw *hw; struct wl1271 *wl; @@ -5369,9 +5783,8 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size) INIT_WORK(&wl->tx_work, wl1271_tx_work); INIT_WORK(&wl->recovery_work, wl1271_recovery_work); INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work); + INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work); INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work); - INIT_DELAYED_WORK(&wl->connection_loss_work, - wl1271_connection_loss_work); wl->freezable_wq = create_freezable_workqueue("wl12xx_wq"); if (!wl->freezable_wq) { @@ -5387,14 +5800,15 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size) wl->flags = 0; wl->sg_enabled = true; wl->sleep_auth = WL1271_PSM_ILLEGAL; + wl->recovery_count = 0; wl->hw_pg_ver = -1; wl->ap_ps_map = 0; wl->ap_fw_ps_map = 0; wl->quirks = 0; wl->platform_quirks = 0; - wl->sched_scanning = false; wl->system_hlid = WL12XX_SYSTEM_HLID; wl->active_sta_count = 0; + wl->active_link_count = 0; wl->fwlog_size = 0; init_waitqueue_head(&wl->fwlog_waitq); @@ -5434,14 +5848,24 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size) goto err_dummy_packet; } - wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA); + wl->mbox_size = mbox_size; + wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA); if (!wl->mbox) { ret = -ENOMEM; goto err_fwlog; } + wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL); + if (!wl->buffer_32) { + ret = -ENOMEM; + goto err_mbox; + } + return hw; +err_mbox: + kfree(wl->mbox); + err_fwlog: free_page((unsigned long)wl->fwlog); @@ -5480,6 +5904,8 @@ int wlcore_free_hw(struct wl1271 *wl) device_remove_file(wl->dev, &dev_attr_hw_pg_ver); device_remove_file(wl->dev, &dev_attr_bt_coex_state); + kfree(wl->buffer_32); + kfree(wl->mbox); free_page((unsigned long)wl->fwlog); dev_kfree_skb(wl->dummy_packet); free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size)); @@ -5536,7 +5962,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context) { struct wl1271 *wl = context; struct platform_device *pdev = wl->pdev; - struct wl12xx_platform_data *pdata = pdev->dev.platform_data; + struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data; + struct wl12xx_platform_data *pdata = pdev_data->pdata; unsigned long irqflags; int ret; @@ -5565,8 +5992,7 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context) wl->irq = platform_get_irq(pdev, 0); wl->platform_quirks = pdata->platform_quirks; - wl->set_power = pdata->set_power; - wl->if_ops = pdata->ops; + wl->if_ops = pdev_data->if_ops; if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) irqflags = IRQF_TRIGGER_RISING; @@ -5712,10 +6138,10 @@ module_param_named(fwlog, fwlog_param, charp, 0); MODULE_PARM_DESC(fwlog, "FW logger options: continuous, ondemand, dbgpins or disable"); -module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR); +module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery"); -module_param(no_recovery, bool, S_IRUSR | S_IWUSR); +module_param(no_recovery, int, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck."); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c index 4d1414a673fb..9b7b6e2e4fbc 100644 --- a/drivers/net/wireless/ti/wlcore/ps.c +++ b/drivers/net/wireless/ti/wlcore/ps.c @@ -151,9 +151,6 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl) wl12xx_queue_recovery_work(wl); ret = -ETIMEDOUT; goto err; - } else if (ret < 0) { - wl1271_error("ELP wakeup completion error."); - goto err; } } @@ -242,11 +239,12 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid) struct ieee80211_tx_info *info; unsigned long flags; int filtered[NUM_TX_QUEUES]; + struct wl1271_link *lnk = &wl->links[hlid]; /* filter all frames currently in the low level queues for this hlid */ for (i = 0; i < NUM_TX_QUEUES; i++) { filtered[i] = 0; - while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) { + while ((skb = skb_dequeue(&lnk->tx_queue[i]))) { filtered[i]++; if (WARN_ON(wl12xx_is_dummy_packet(wl, skb))) @@ -260,8 +258,11 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid) } spin_lock_irqsave(&wl->wl_lock, flags); - for (i = 0; i < NUM_TX_QUEUES; i++) + for (i = 0; i < NUM_TX_QUEUES; i++) { wl->tx_queue_count[i] -= filtered[i]; + if (lnk->wlvif) + lnk->wlvif->tx_queue_count[i] -= filtered[i]; + } spin_unlock_irqrestore(&wl->wl_lock, flags); wl1271_handle_tx_low_watermark(wl); diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c index 9ee0ec6fd1db..6791a1a6afba 100644 --- a/drivers/net/wireless/ti/wlcore/rx.c +++ b/drivers/net/wireless/ti/wlcore/rx.c @@ -92,11 +92,16 @@ static void wl1271_rx_status(struct wl1271 *wl, status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED | RX_FLAG_DECRYPTED; - if (unlikely(desc_err_code == WL1271_RX_DESC_MIC_FAIL)) { + if (unlikely(desc_err_code & WL1271_RX_DESC_MIC_FAIL)) { status->flag |= RX_FLAG_MMIC_ERROR; - wl1271_warning("Michael MIC error"); + wl1271_warning("Michael MIC error. Desc: 0x%x", + desc_err_code); } } + + if (beacon) + wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel, + status->band); } static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, @@ -108,7 +113,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, u8 *buf; u8 beacon = 0; u8 is_data = 0; - u8 reserved = 0; + u8 reserved = 0, offset_to_data = 0; u16 seq_num; u32 pkt_data_len; @@ -128,6 +133,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, if (rx_align == WLCORE_RX_BUF_UNALIGNED) reserved = RX_BUF_ALIGN; + else if (rx_align == WLCORE_RX_BUF_PADDED) + offset_to_data = RX_BUF_ALIGN; /* the data read starts with the descriptor */ desc = (struct wl1271_rx_descriptor *) data; @@ -139,19 +146,15 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, return 0; } - switch (desc->status & WL1271_RX_DESC_STATUS_MASK) { /* discard corrupted packets */ - case WL1271_RX_DESC_DRIVER_RX_Q_FAIL: - case WL1271_RX_DESC_DECRYPT_FAIL: - wl1271_warning("corrupted packet in RX with status: 0x%x", - desc->status & WL1271_RX_DESC_STATUS_MASK); - return -EINVAL; - case WL1271_RX_DESC_SUCCESS: - case WL1271_RX_DESC_MIC_FAIL: - break; - default: - wl1271_error("invalid RX descriptor status: 0x%x", - desc->status & WL1271_RX_DESC_STATUS_MASK); + if (desc->status & WL1271_RX_DESC_DECRYPT_FAIL) { + hdr = (void *)(data + sizeof(*desc) + offset_to_data); + wl1271_warning("corrupted packet in RX: status: 0x%x len: %d", + desc->status & WL1271_RX_DESC_STATUS_MASK, + pkt_data_len); + wl1271_dump((DEBUG_RX|DEBUG_CMD), "PKT: ", data + sizeof(*desc), + min(pkt_data_len, + ieee80211_hdrlen(hdr->frame_control))); return -EINVAL; } diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h index 71eba1899915..3363f60fb7da 100644 --- a/drivers/net/wireless/ti/wlcore/rx.h +++ b/drivers/net/wireless/ti/wlcore/rx.h @@ -84,12 +84,11 @@ * Bits 3-5 - process_id tag (AP mode FW) * Bits 6-7 - reserved */ -#define WL1271_RX_DESC_STATUS_MASK 0x03 +#define WL1271_RX_DESC_STATUS_MASK 0x07 #define WL1271_RX_DESC_SUCCESS 0x00 #define WL1271_RX_DESC_DECRYPT_FAIL 0x01 #define WL1271_RX_DESC_MIC_FAIL 0x02 -#define WL1271_RX_DESC_DRIVER_RX_Q_FAIL 0x03 #define RX_MEM_BLOCK_MASK 0xFF #define RX_BUF_SIZE_MASK 0xFFF00 diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c index d00501493dfe..f407101e525b 100644 --- a/drivers/net/wireless/ti/wlcore/scan.c +++ b/drivers/net/wireless/ti/wlcore/scan.c @@ -35,7 +35,6 @@ void wl1271_scan_complete_work(struct work_struct *work) { struct delayed_work *dwork; struct wl1271 *wl; - struct ieee80211_vif *vif; struct wl12xx_vif *wlvif; int ret; @@ -52,8 +51,7 @@ void wl1271_scan_complete_work(struct work_struct *work) if (wl->scan.state == WL1271_SCAN_STATE_IDLE) goto out; - vif = wl->scan_vif; - wlvif = wl12xx_vif_to_data(vif); + wlvif = wl->scan_wlvif; /* * Rearm the tx watchdog just before idling scan. This @@ -64,7 +62,7 @@ void wl1271_scan_complete_work(struct work_struct *work) wl->scan.state = WL1271_SCAN_STATE_IDLE; memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); wl->scan.req = NULL; - wl->scan_vif = NULL; + wl->scan_wlvif = NULL; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) @@ -82,6 +80,8 @@ void wl1271_scan_complete_work(struct work_struct *work) wl12xx_queue_recovery_work(wl); } + wlcore_cmd_regdomain_config_locked(wl); + ieee80211_scan_completed(wl->hw, false); out: @@ -89,371 +89,99 @@ out: } - -static int wl1271_get_scan_channels(struct wl1271 *wl, - struct cfg80211_scan_request *req, - struct basic_scan_channel_params *channels, - enum ieee80211_band band, bool passive) -{ - struct conf_scan_settings *c = &wl->conf.scan; - int i, j; - u32 flags; - - for (i = 0, j = 0; - i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS; - i++) { - flags = req->channels[i]->flags; - - if (!test_bit(i, wl->scan.scanned_ch) && - !(flags & IEEE80211_CHAN_DISABLED) && - (req->channels[i]->band == band) && - /* - * In passive scans, we scan all remaining - * channels, even if not marked as such. - * In active scans, we only scan channels not - * marked as passive. - */ - (passive || !(flags & IEEE80211_CHAN_PASSIVE_SCAN))) { - wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ", - req->channels[i]->band, - req->channels[i]->center_freq); - wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X", - req->channels[i]->hw_value, - req->channels[i]->flags); - wl1271_debug(DEBUG_SCAN, - "max_antenna_gain %d, max_power %d", - req->channels[i]->max_antenna_gain, - req->channels[i]->max_power); - wl1271_debug(DEBUG_SCAN, "beacon_found %d", - req->channels[i]->beacon_found); - - if (!passive) { - channels[j].min_duration = - cpu_to_le32(c->min_dwell_time_active); - channels[j].max_duration = - cpu_to_le32(c->max_dwell_time_active); - } else { - channels[j].min_duration = - cpu_to_le32(c->min_dwell_time_passive); - channels[j].max_duration = - cpu_to_le32(c->max_dwell_time_passive); - } - channels[j].early_termination = 0; - channels[j].tx_power_att = req->channels[i]->max_power; - channels[j].channel = req->channels[i]->hw_value; - - memset(&channels[j].bssid_lsb, 0xff, 4); - memset(&channels[j].bssid_msb, 0xff, 2); - - /* Mark the channels we already used */ - set_bit(i, wl->scan.scanned_ch); - - j++; - } - } - - return j; -} - -#define WL1271_NOTHING_TO_SCAN 1 - -static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif, - enum ieee80211_band band, - bool passive, u32 basic_rate) +static void wlcore_started_vifs_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); - struct wl1271_cmd_scan *cmd; - struct wl1271_cmd_trigger_scan_to *trigger; - int ret; - u16 scan_options = 0; - - /* skip active scans if we don't have SSIDs */ - if (!passive && wl->scan.req->n_ssids == 0) - return WL1271_NOTHING_TO_SCAN; - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); - if (!cmd || !trigger) { - ret = -ENOMEM; - goto out; - } - - if (wl->conf.scan.split_scan_timeout) - scan_options |= WL1271_SCAN_OPT_SPLIT_SCAN; - - if (passive) - scan_options |= WL1271_SCAN_OPT_PASSIVE; - - cmd->params.role_id = wlvif->role_id; - - if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) { - ret = -EINVAL; - goto out; - } - - cmd->params.scan_options = cpu_to_le16(scan_options); - - cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req, - cmd->channels, - band, passive); - if (cmd->params.n_ch == 0) { - ret = WL1271_NOTHING_TO_SCAN; - goto out; - } - - cmd->params.tx_rate = cpu_to_le32(basic_rate); - cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs; - cmd->params.tid_trigger = CONF_TX_AC_ANY_TID; - cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; - - if (band == IEEE80211_BAND_2GHZ) - cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ; - else - cmd->params.band = WL1271_SCAN_BAND_5_GHZ; - - if (wl->scan.ssid_len && wl->scan.ssid) { - cmd->params.ssid_len = wl->scan.ssid_len; - memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len); - } - - memcpy(cmd->addr, vif->addr, ETH_ALEN); - - ret = wl12xx_cmd_build_probe_req(wl, wlvif, - cmd->params.role_id, band, - wl->scan.ssid, wl->scan.ssid_len, - wl->scan.req->ie, - wl->scan.req->ie_len, false); - if (ret < 0) { - wl1271_error("PROBE request template failed"); - goto out; - } - - trigger->timeout = cpu_to_le32(wl->conf.scan.split_scan_timeout); - ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger, - sizeof(*trigger), 0); - if (ret < 0) { - wl1271_error("trigger scan to failed for hw scan"); - goto out; - } - - wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd)); + int *count = (int *)data; - ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0); - if (ret < 0) { - wl1271_error("SCAN failed"); - goto out; - } - -out: - kfree(cmd); - kfree(trigger); - return ret; + if (!vif->bss_conf.idle) + (*count)++; } -void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif) +static int wlcore_count_started_vifs(struct wl1271 *wl) { - struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); - int ret = 0; - enum ieee80211_band band; - u32 rate, mask; - - switch (wl->scan.state) { - case WL1271_SCAN_STATE_IDLE: - break; - - case WL1271_SCAN_STATE_2GHZ_ACTIVE: - band = IEEE80211_BAND_2GHZ; - mask = wlvif->bitrate_masks[band]; - if (wl->scan.req->no_cck) { - mask &= ~CONF_TX_CCK_RATES; - if (!mask) - mask = CONF_TX_RATE_MASK_BASIC_P2P; - } - rate = wl1271_tx_min_rate_get(wl, mask); - ret = wl1271_scan_send(wl, vif, band, false, rate); - if (ret == WL1271_NOTHING_TO_SCAN) { - wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE; - wl1271_scan_stm(wl, vif); - } - - break; - - case WL1271_SCAN_STATE_2GHZ_PASSIVE: - band = IEEE80211_BAND_2GHZ; - mask = wlvif->bitrate_masks[band]; - if (wl->scan.req->no_cck) { - mask &= ~CONF_TX_CCK_RATES; - if (!mask) - mask = CONF_TX_RATE_MASK_BASIC_P2P; - } - rate = wl1271_tx_min_rate_get(wl, mask); - ret = wl1271_scan_send(wl, vif, band, true, rate); - if (ret == WL1271_NOTHING_TO_SCAN) { - if (wl->enable_11a) - wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE; - else - wl->scan.state = WL1271_SCAN_STATE_DONE; - wl1271_scan_stm(wl, vif); - } - - break; + int count = 0; - case WL1271_SCAN_STATE_5GHZ_ACTIVE: - band = IEEE80211_BAND_5GHZ; - rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); - ret = wl1271_scan_send(wl, vif, band, false, rate); - if (ret == WL1271_NOTHING_TO_SCAN) { - wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE; - wl1271_scan_stm(wl, vif); - } - - break; - - case WL1271_SCAN_STATE_5GHZ_PASSIVE: - band = IEEE80211_BAND_5GHZ; - rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); - ret = wl1271_scan_send(wl, vif, band, true, rate); - if (ret == WL1271_NOTHING_TO_SCAN) { - wl->scan.state = WL1271_SCAN_STATE_DONE; - wl1271_scan_stm(wl, vif); - } - - break; - - case WL1271_SCAN_STATE_DONE: - wl->scan.failed = false; - cancel_delayed_work(&wl->scan_complete_work); - ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, - msecs_to_jiffies(0)); - break; - - default: - wl1271_error("invalid scan state"); - break; - } - - if (ret < 0) { - cancel_delayed_work(&wl->scan_complete_work); - ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, - msecs_to_jiffies(0)); - } -} - -int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif, - const u8 *ssid, size_t ssid_len, - struct cfg80211_scan_request *req) -{ - /* - * cfg80211 should guarantee that we don't get more channels - * than what we have registered. - */ - BUG_ON(req->n_channels > WL1271_MAX_CHANNELS); - - if (wl->scan.state != WL1271_SCAN_STATE_IDLE) - return -EBUSY; - - wl->scan.state = WL1271_SCAN_STATE_2GHZ_ACTIVE; - - if (ssid_len && ssid) { - wl->scan.ssid_len = ssid_len; - memcpy(wl->scan.ssid, ssid, ssid_len); - } else { - wl->scan.ssid_len = 0; - } - - wl->scan_vif = vif; - wl->scan.req = req; - memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); - - /* we assume failure so that timeout scenarios are handled correctly */ - wl->scan.failed = true; - ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, - msecs_to_jiffies(WL1271_SCAN_TIMEOUT)); - - wl1271_scan_stm(wl, vif); - - return 0; -} - -int wl1271_scan_stop(struct wl1271 *wl) -{ - struct wl1271_cmd_header *cmd = NULL; - int ret = 0; - - if (WARN_ON(wl->scan.state == WL1271_SCAN_STATE_IDLE)) - return -EINVAL; - - wl1271_debug(DEBUG_CMD, "cmd scan stop"); - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) { - ret = -ENOMEM; - goto out; - } - - ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, cmd, - sizeof(*cmd), 0); - if (ret < 0) { - wl1271_error("cmd stop_scan failed"); - goto out; - } -out: - kfree(cmd); - return ret; + ieee80211_iterate_active_interfaces_atomic(wl->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + wlcore_started_vifs_iter, &count); + return count; } static int -wl1271_scan_get_sched_scan_channels(struct wl1271 *wl, - struct cfg80211_sched_scan_request *req, - struct conn_scan_ch_params *channels, - u32 band, bool radar, bool passive, - int start, int max_channels, - u8 *n_pactive_ch) +wlcore_scan_get_channels(struct wl1271 *wl, + struct ieee80211_channel *req_channels[], + u32 n_channels, + u32 n_ssids, + struct conn_scan_ch_params *channels, + u32 band, bool radar, bool passive, + int start, int max_channels, + u8 *n_pactive_ch, + int scan_type) { - struct conf_sched_scan_settings *c = &wl->conf.sched_scan; int i, j; u32 flags; - bool force_passive = !req->n_ssids; - u32 min_dwell_time_active, max_dwell_time_active, delta_per_probe; + bool force_passive = !n_ssids; + u32 min_dwell_time_active, max_dwell_time_active; u32 dwell_time_passive, dwell_time_dfs; - if (band == IEEE80211_BAND_5GHZ) - delta_per_probe = c->dwell_time_delta_per_probe_5; - else - delta_per_probe = c->dwell_time_delta_per_probe; + /* configure dwell times according to scan type */ + if (scan_type == SCAN_TYPE_SEARCH) { + struct conf_scan_settings *c = &wl->conf.scan; + bool active_vif_exists = !!wlcore_count_started_vifs(wl); + + min_dwell_time_active = active_vif_exists ? + c->min_dwell_time_active : + c->min_dwell_time_active_long; + max_dwell_time_active = active_vif_exists ? + c->max_dwell_time_active : + c->max_dwell_time_active_long; + dwell_time_passive = c->dwell_time_passive; + dwell_time_dfs = c->dwell_time_dfs; + } else { + struct conf_sched_scan_settings *c = &wl->conf.sched_scan; + u32 delta_per_probe; - min_dwell_time_active = c->base_dwell_time + - req->n_ssids * c->num_probe_reqs * delta_per_probe; + if (band == IEEE80211_BAND_5GHZ) + delta_per_probe = c->dwell_time_delta_per_probe_5; + else + delta_per_probe = c->dwell_time_delta_per_probe; - max_dwell_time_active = min_dwell_time_active + c->max_dwell_time_delta; + min_dwell_time_active = c->base_dwell_time + + n_ssids * c->num_probe_reqs * delta_per_probe; + max_dwell_time_active = min_dwell_time_active + + c->max_dwell_time_delta; + dwell_time_passive = c->dwell_time_passive; + dwell_time_dfs = c->dwell_time_dfs; + } min_dwell_time_active = DIV_ROUND_UP(min_dwell_time_active, 1000); max_dwell_time_active = DIV_ROUND_UP(max_dwell_time_active, 1000); - dwell_time_passive = DIV_ROUND_UP(c->dwell_time_passive, 1000); - dwell_time_dfs = DIV_ROUND_UP(c->dwell_time_dfs, 1000); + dwell_time_passive = DIV_ROUND_UP(dwell_time_passive, 1000); + dwell_time_dfs = DIV_ROUND_UP(dwell_time_dfs, 1000); for (i = 0, j = start; - i < req->n_channels && j < max_channels; + i < n_channels && j < max_channels; i++) { - flags = req->channels[i]->flags; + flags = req_channels[i]->flags; if (force_passive) flags |= IEEE80211_CHAN_PASSIVE_SCAN; - if ((req->channels[i]->band == band) && + if ((req_channels[i]->band == band) && !(flags & IEEE80211_CHAN_DISABLED) && (!!(flags & IEEE80211_CHAN_RADAR) == radar) && /* if radar is set, we ignore the passive flag */ (radar || !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) { wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ", - req->channels[i]->band, - req->channels[i]->center_freq); + req_channels[i]->band, + req_channels[i]->center_freq); wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X", - req->channels[i]->hw_value, - req->channels[i]->flags); + req_channels[i]->hw_value, + req_channels[i]->flags); wl1271_debug(DEBUG_SCAN, "max_power %d", - req->channels[i]->max_power); + req_channels[i]->max_power); wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d", min_dwell_time_active, max_dwell_time_active); @@ -473,10 +201,11 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl, channels[j].max_duration = cpu_to_le16(max_dwell_time_active); - channels[j].tx_power_att = req->channels[i]->max_power; - channels[j].channel = req->channels[i]->hw_value; + channels[j].tx_power_att = req_channels[i]->max_power; + channels[j].channel = req_channels[i]->hw_value; - if ((band == IEEE80211_BAND_2GHZ) && + if (n_pactive_ch && + (band == IEEE80211_BAND_2GHZ) && (channels[j].channel >= 12) && (channels[j].channel <= 14) && (flags & IEEE80211_CHAN_PASSIVE_SCAN) && @@ -500,51 +229,80 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl, return j - start; } -static bool -wl1271_scan_sched_scan_channels(struct wl1271 *wl, - struct cfg80211_sched_scan_request *req, - struct wl1271_cmd_sched_scan_config *cfg) +bool +wlcore_set_scan_chan_params(struct wl1271 *wl, + struct wlcore_scan_channels *cfg, + struct ieee80211_channel *channels[], + u32 n_channels, + u32 n_ssids, + int scan_type) { u8 n_pactive_ch = 0; cfg->passive[0] = - wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2, - IEEE80211_BAND_2GHZ, - false, true, 0, - MAX_CHANNELS_2GHZ, - &n_pactive_ch); + wlcore_scan_get_channels(wl, + channels, + n_channels, + n_ssids, + cfg->channels_2, + IEEE80211_BAND_2GHZ, + false, true, 0, + MAX_CHANNELS_2GHZ, + &n_pactive_ch, + scan_type); cfg->active[0] = - wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2, - IEEE80211_BAND_2GHZ, - false, false, - cfg->passive[0], - MAX_CHANNELS_2GHZ, - &n_pactive_ch); + wlcore_scan_get_channels(wl, + channels, + n_channels, + n_ssids, + cfg->channels_2, + IEEE80211_BAND_2GHZ, + false, false, + cfg->passive[0], + MAX_CHANNELS_2GHZ, + &n_pactive_ch, + scan_type); cfg->passive[1] = - wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5, - IEEE80211_BAND_5GHZ, - false, true, 0, - MAX_CHANNELS_5GHZ, - &n_pactive_ch); + wlcore_scan_get_channels(wl, + channels, + n_channels, + n_ssids, + cfg->channels_5, + IEEE80211_BAND_5GHZ, + false, true, 0, + wl->max_channels_5, + &n_pactive_ch, + scan_type); cfg->dfs = - wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5, - IEEE80211_BAND_5GHZ, - true, true, - cfg->passive[1], - MAX_CHANNELS_5GHZ, - &n_pactive_ch); + wlcore_scan_get_channels(wl, + channels, + n_channels, + n_ssids, + cfg->channels_5, + IEEE80211_BAND_5GHZ, + true, true, + cfg->passive[1], + wl->max_channels_5, + &n_pactive_ch, + scan_type); cfg->active[1] = - wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5, - IEEE80211_BAND_5GHZ, - false, false, - cfg->passive[1] + cfg->dfs, - MAX_CHANNELS_5GHZ, - &n_pactive_ch); + wlcore_scan_get_channels(wl, + channels, + n_channels, + n_ssids, + cfg->channels_5, + IEEE80211_BAND_5GHZ, + false, false, + cfg->passive[1] + cfg->dfs, + wl->max_channels_5, + &n_pactive_ch, + scan_type); + /* 802.11j channels are not supported yet */ cfg->passive[2] = 0; cfg->active[2] = 0; - cfg->n_pactive_ch = n_pactive_ch; + cfg->passive_active = n_pactive_ch; wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d", cfg->active[0], cfg->passive[0]); @@ -556,10 +314,48 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl, cfg->passive[1] || cfg->active[1] || cfg->dfs || cfg->passive[2] || cfg->active[2]; } +EXPORT_SYMBOL_GPL(wlcore_set_scan_chan_params); + +int wlcore_scan(struct wl1271 *wl, struct ieee80211_vif *vif, + const u8 *ssid, size_t ssid_len, + struct cfg80211_scan_request *req) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + + /* + * cfg80211 should guarantee that we don't get more channels + * than what we have registered. + */ + BUG_ON(req->n_channels > WL1271_MAX_CHANNELS); + + if (wl->scan.state != WL1271_SCAN_STATE_IDLE) + return -EBUSY; + + wl->scan.state = WL1271_SCAN_STATE_2GHZ_ACTIVE; + + if (ssid_len && ssid) { + wl->scan.ssid_len = ssid_len; + memcpy(wl->scan.ssid, ssid, ssid_len); + } else { + wl->scan.ssid_len = 0; + } + + wl->scan_wlvif = wlvif; + wl->scan.req = req; + memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); + + /* we assume failure so that timeout scenarios are handled correctly */ + wl->scan.failed = true; + ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, + msecs_to_jiffies(WL1271_SCAN_TIMEOUT)); + wl->ops->scan_start(wl, wlvif, req); + + return 0; +} /* Returns the scan type to be used or a negative value on error */ -static int -wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl, +int +wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct cfg80211_sched_scan_request *req) { @@ -662,160 +458,12 @@ out: return ret; return type; } +EXPORT_SYMBOL_GPL(wlcore_scan_sched_scan_ssid_list); -int wl1271_scan_sched_scan_config(struct wl1271 *wl, - struct wl12xx_vif *wlvif, - struct cfg80211_sched_scan_request *req, - struct ieee80211_sched_scan_ies *ies) -{ - struct wl1271_cmd_sched_scan_config *cfg = NULL; - struct conf_sched_scan_settings *c = &wl->conf.sched_scan; - int i, ret; - bool force_passive = !req->n_ssids; - - wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config"); - - cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); - if (!cfg) - return -ENOMEM; - - cfg->role_id = wlvif->role_id; - cfg->rssi_threshold = c->rssi_threshold; - cfg->snr_threshold = c->snr_threshold; - cfg->n_probe_reqs = c->num_probe_reqs; - /* cycles set to 0 it means infinite (until manually stopped) */ - cfg->cycles = 0; - /* report APs when at least 1 is found */ - cfg->report_after = 1; - /* don't stop scanning automatically when something is found */ - cfg->terminate = 0; - cfg->tag = WL1271_SCAN_DEFAULT_TAG; - /* don't filter on BSS type */ - cfg->bss_type = SCAN_BSS_TYPE_ANY; - /* currently NL80211 supports only a single interval */ - for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++) - cfg->intervals[i] = cpu_to_le32(req->interval); - - cfg->ssid_len = 0; - ret = wl12xx_scan_sched_scan_ssid_list(wl, wlvif, req); - if (ret < 0) - goto out; - - cfg->filter_type = ret; - - wl1271_debug(DEBUG_SCAN, "filter_type = %d", cfg->filter_type); - - if (!wl1271_scan_sched_scan_channels(wl, req, cfg)) { - wl1271_error("scan channel list is empty"); - ret = -EINVAL; - goto out; - } - - if (!force_passive && cfg->active[0]) { - u8 band = IEEE80211_BAND_2GHZ; - ret = wl12xx_cmd_build_probe_req(wl, wlvif, - wlvif->role_id, band, - req->ssids[0].ssid, - req->ssids[0].ssid_len, - ies->ie[band], - ies->len[band], true); - if (ret < 0) { - wl1271_error("2.4GHz PROBE request template failed"); - goto out; - } - } - - if (!force_passive && cfg->active[1]) { - u8 band = IEEE80211_BAND_5GHZ; - ret = wl12xx_cmd_build_probe_req(wl, wlvif, - wlvif->role_id, band, - req->ssids[0].ssid, - req->ssids[0].ssid_len, - ies->ie[band], - ies->len[band], true); - if (ret < 0) { - wl1271_error("5GHz PROBE request template failed"); - goto out; - } - } - - wl1271_dump(DEBUG_SCAN, "SCAN_CFG: ", cfg, sizeof(*cfg)); - - ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_CFG, cfg, - sizeof(*cfg), 0); - if (ret < 0) { - wl1271_error("SCAN configuration failed"); - goto out; - } -out: - kfree(cfg); - return ret; -} - -int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif) -{ - struct wl1271_cmd_sched_scan_start *start; - int ret = 0; - - wl1271_debug(DEBUG_CMD, "cmd periodic scan start"); - - if (wlvif->bss_type != BSS_TYPE_STA_BSS) - return -EOPNOTSUPP; - - if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) && - test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) - return -EBUSY; - - start = kzalloc(sizeof(*start), GFP_KERNEL); - if (!start) - return -ENOMEM; - - start->role_id = wlvif->role_id; - start->tag = WL1271_SCAN_DEFAULT_TAG; - - ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start, - sizeof(*start), 0); - if (ret < 0) { - wl1271_error("failed to send scan start command"); - goto out_free; - } - -out_free: - kfree(start); - return ret; -} - -void wl1271_scan_sched_scan_results(struct wl1271 *wl) +void wlcore_scan_sched_scan_results(struct wl1271 *wl) { wl1271_debug(DEBUG_SCAN, "got periodic scan results"); ieee80211_sched_scan_results(wl->hw); } - -void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif) -{ - struct wl1271_cmd_sched_scan_stop *stop; - int ret = 0; - - wl1271_debug(DEBUG_CMD, "cmd periodic scan stop"); - - /* FIXME: what to do if alloc'ing to stop fails? */ - stop = kzalloc(sizeof(*stop), GFP_KERNEL); - if (!stop) { - wl1271_error("failed to alloc memory to send sched scan stop"); - return; - } - - stop->role_id = wlvif->role_id; - stop->tag = WL1271_SCAN_DEFAULT_TAG; - - ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop, - sizeof(*stop), 0); - if (ret < 0) { - wl1271_error("failed to send sched scan stop command"); - goto out_free; - } - -out_free: - kfree(stop); -} +EXPORT_SYMBOL_GPL(wlcore_scan_sched_scan_results); diff --git a/drivers/net/wireless/ti/wlcore/scan.h b/drivers/net/wireless/ti/wlcore/scan.h index 29f3c8d6b046..a6ab24b5c0f9 100644 --- a/drivers/net/wireless/ti/wlcore/scan.h +++ b/drivers/net/wireless/ti/wlcore/scan.h @@ -26,22 +26,20 @@ #include "wlcore.h" -int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif, +int wlcore_scan(struct wl1271 *wl, struct ieee80211_vif *vif, const u8 *ssid, size_t ssid_len, struct cfg80211_scan_request *req); -int wl1271_scan_stop(struct wl1271 *wl); int wl1271_scan_build_probe_req(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u8 band); -void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif); +void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif); void wl1271_scan_complete_work(struct work_struct *work); int wl1271_scan_sched_scan_config(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct cfg80211_sched_scan_request *req, struct ieee80211_sched_scan_ies *ies); int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif); -void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif); -void wl1271_scan_sched_scan_results(struct wl1271 *wl); +void wlcore_scan_sched_scan_results(struct wl1271 *wl); #define WL1271_SCAN_MAX_CHANNELS 24 #define WL1271_SCAN_DEFAULT_TAG 1 @@ -66,56 +64,6 @@ enum { WL1271_SCAN_STATE_DONE }; -struct basic_scan_params { - /* Scan option flags (WL1271_SCAN_OPT_*) */ - __le16 scan_options; - u8 role_id; - /* Number of scan channels in the list (maximum 30) */ - u8 n_ch; - /* This field indicates the number of probe requests to send - per channel for an active scan */ - u8 n_probe_reqs; - u8 tid_trigger; - u8 ssid_len; - u8 use_ssid_list; - - /* Rate bit field for sending the probes */ - __le32 tx_rate; - - u8 ssid[IEEE80211_MAX_SSID_LEN]; - /* Band to scan */ - u8 band; - - u8 scan_tag; - u8 padding2[2]; -} __packed; - -struct basic_scan_channel_params { - /* Duration in TU to wait for frames on a channel for active scan */ - __le32 min_duration; - __le32 max_duration; - __le32 bssid_lsb; - __le16 bssid_msb; - u8 early_termination; - u8 tx_power_att; - u8 channel; - /* FW internal use only! */ - u8 dfs_candidate; - u8 activity_detected; - u8 pad; -} __packed; - -struct wl1271_cmd_scan { - struct wl1271_cmd_header header; - - struct basic_scan_params params; - struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS]; - - /* src mac address */ - u8 addr[ETH_ALEN]; - u8 padding[2]; -} __packed; - struct wl1271_cmd_trigger_scan_to { struct wl1271_cmd_header header; @@ -123,9 +71,17 @@ struct wl1271_cmd_trigger_scan_to { } __packed; #define MAX_CHANNELS_2GHZ 14 -#define MAX_CHANNELS_5GHZ 23 #define MAX_CHANNELS_4GHZ 4 +/* + * This max value here is used only for the struct definition of + * wlcore_scan_channels. This struct is used by both 12xx + * and 18xx (which have different max 5ghz channels value). + * In order to make sure this is large enough, just use the + * max possible 5ghz channels. + */ +#define MAX_CHANNELS_5GHZ 42 + #define SCAN_MAX_CYCLE_INTERVALS 16 #define SCAN_MAX_BANDS 3 @@ -160,43 +116,6 @@ struct conn_scan_ch_params { u8 padding[3]; } __packed; -struct wl1271_cmd_sched_scan_config { - struct wl1271_cmd_header header; - - __le32 intervals[SCAN_MAX_CYCLE_INTERVALS]; - - s8 rssi_threshold; /* for filtering (in dBm) */ - s8 snr_threshold; /* for filtering (in dB) */ - - u8 cycles; /* maximum number of scan cycles */ - u8 report_after; /* report when this number of results are received */ - u8 terminate; /* stop scanning after reporting */ - - u8 tag; - u8 bss_type; /* for filtering */ - u8 filter_type; - - u8 ssid_len; /* For SCAN_SSID_FILTER_SPECIFIC */ - u8 ssid[IEEE80211_MAX_SSID_LEN]; - - u8 n_probe_reqs; /* Number of probes requests per channel */ - - u8 passive[SCAN_MAX_BANDS]; - u8 active[SCAN_MAX_BANDS]; - - u8 dfs; - - u8 n_pactive_ch; /* number of pactive (passive until fw detects energy) - channels in BG band */ - u8 role_id; - u8 padding[1]; - - struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ]; - struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ]; - struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ]; -} __packed; - - #define SCHED_SCAN_MAX_SSIDS 16 enum { @@ -220,21 +139,34 @@ struct wl1271_cmd_sched_scan_ssid_list { u8 padding[2]; } __packed; -struct wl1271_cmd_sched_scan_start { - struct wl1271_cmd_header header; +struct wlcore_scan_channels { + u8 passive[SCAN_MAX_BANDS]; /* number of passive scan channels */ + u8 active[SCAN_MAX_BANDS]; /* number of active scan channels */ + u8 dfs; /* number of dfs channels in 5ghz */ + u8 passive_active; /* number of passive before active channels 2.4ghz */ - u8 tag; - u8 role_id; - u8 padding[2]; -} __packed; - -struct wl1271_cmd_sched_scan_stop { - struct wl1271_cmd_header header; + struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ]; + struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ]; + struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ]; +}; - u8 tag; - u8 role_id; - u8 padding[2]; -} __packed; +enum { + SCAN_TYPE_SEARCH = 0, + SCAN_TYPE_PERIODIC = 1, + SCAN_TYPE_TRACKING = 2, +}; +bool +wlcore_set_scan_chan_params(struct wl1271 *wl, + struct wlcore_scan_channels *cfg, + struct ieee80211_channel *channels[], + u32 n_channels, + u32 n_ssids, + int scan_type); + +int +wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct cfg80211_sched_scan_request *req); #endif /* __WL1271_SCAN_H__ */ diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 646f703ae739..29ef2492951f 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -217,7 +217,7 @@ static struct wl1271_if_operations sdio_ops = { static int wl1271_probe(struct sdio_func *func, const struct sdio_device_id *id) { - struct wl12xx_platform_data *wlan_data; + struct wlcore_platdev_data *pdev_data; struct wl12xx_sdio_glue *glue; struct resource res[1]; mmc_pm_flag_t mmcflags; @@ -228,10 +228,16 @@ static int wl1271_probe(struct sdio_func *func, if (func->num != 0x02) return -ENODEV; + pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL); + if (!pdev_data) + goto out; + + pdev_data->if_ops = &sdio_ops; + glue = kzalloc(sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&func->dev, "can't allocate glue\n"); - goto out; + goto out_free_pdev_data; } glue->dev = &func->dev; @@ -242,9 +248,9 @@ static int wl1271_probe(struct sdio_func *func, /* Use block mode for transferring over one block size of data */ func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; - wlan_data = wl12xx_get_platform_data(); - if (IS_ERR(wlan_data)) { - ret = PTR_ERR(wlan_data); + pdev_data->pdata = wl12xx_get_platform_data(); + if (IS_ERR(pdev_data->pdata)) { + ret = PTR_ERR(pdev_data->pdata); dev_err(glue->dev, "missing wlan platform data: %d\n", ret); goto out_free_glue; } @@ -254,9 +260,7 @@ static int wl1271_probe(struct sdio_func *func, dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags); if (mmcflags & MMC_PM_KEEP_POWER) - wlan_data->pwr_in_suspend = true; - - wlan_data->ops = &sdio_ops; + pdev_data->pdata->pwr_in_suspend = true; sdio_set_drvdata(func, glue); @@ -274,7 +278,7 @@ static int wl1271_probe(struct sdio_func *func, else chip_family = "wl12xx"; - glue->core = platform_device_alloc(chip_family, -1); + glue->core = platform_device_alloc(chip_family, PLATFORM_DEVID_AUTO); if (!glue->core) { dev_err(glue->dev, "can't allocate platform_device"); ret = -ENOMEM; @@ -285,7 +289,7 @@ static int wl1271_probe(struct sdio_func *func, memset(res, 0x00, sizeof(res)); - res[0].start = wlan_data->irq; + res[0].start = pdev_data->pdata->irq; res[0].flags = IORESOURCE_IRQ; res[0].name = "irq"; @@ -295,8 +299,8 @@ static int wl1271_probe(struct sdio_func *func, goto out_dev_put; } - ret = platform_device_add_data(glue->core, wlan_data, - sizeof(*wlan_data)); + ret = platform_device_add_data(glue->core, pdev_data, + sizeof(*pdev_data)); if (ret) { dev_err(glue->dev, "can't add platform data\n"); goto out_dev_put; @@ -315,6 +319,9 @@ out_dev_put: out_free_glue: kfree(glue); +out_free_pdev_data: + kfree(pdev_data); + out: return ret; } @@ -326,8 +333,7 @@ static void wl1271_remove(struct sdio_func *func) /* Undo decrement done above in wl1271_probe */ pm_runtime_get_noresume(&func->dev); - platform_device_del(glue->core); - platform_device_put(glue->core); + platform_device_unregister(glue->core); kfree(glue); } diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index f06f4770ce02..e26447832683 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -270,7 +270,7 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr, void *buf, size_t len, bool fixed) { struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); - struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS]; + struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)]; struct spi_message m; u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; u32 *cmd; @@ -327,22 +327,27 @@ static struct wl1271_if_operations spi_ops = { static int wl1271_probe(struct spi_device *spi) { struct wl12xx_spi_glue *glue; - struct wl12xx_platform_data *pdata; + struct wlcore_platdev_data *pdev_data; struct resource res[1]; int ret = -ENOMEM; - pdata = spi->dev.platform_data; - if (!pdata) { + pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL); + if (!pdev_data) + goto out; + + pdev_data->pdata = spi->dev.platform_data; + if (!pdev_data->pdata) { dev_err(&spi->dev, "no platform data\n"); - return -ENODEV; + ret = -ENODEV; + goto out_free_pdev_data; } - pdata->ops = &spi_ops; + pdev_data->if_ops = &spi_ops; glue = kzalloc(sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&spi->dev, "can't allocate glue\n"); - goto out; + goto out_free_pdev_data; } glue->dev = &spi->dev; @@ -359,7 +364,7 @@ static int wl1271_probe(struct spi_device *spi) goto out_free_glue; } - glue->core = platform_device_alloc("wl12xx", -1); + glue->core = platform_device_alloc("wl12xx", PLATFORM_DEVID_AUTO); if (!glue->core) { dev_err(glue->dev, "can't allocate platform_device\n"); ret = -ENOMEM; @@ -380,7 +385,8 @@ static int wl1271_probe(struct spi_device *spi) goto out_dev_put; } - ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata)); + ret = platform_device_add_data(glue->core, pdev_data, + sizeof(*pdev_data)); if (ret) { dev_err(glue->dev, "can't add platform data\n"); goto out_dev_put; @@ -399,6 +405,10 @@ out_dev_put: out_free_glue: kfree(glue); + +out_free_pdev_data: + kfree(pdev_data); + out: return ret; } @@ -407,8 +417,7 @@ static int wl1271_remove(struct spi_device *spi) { struct wl12xx_spi_glue *glue = spi_get_drvdata(spi); - platform_device_del(glue->core); - platform_device_put(glue->core); + platform_device_unregister(glue->core); kfree(glue); return 0; diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index a90d3cd09408..ece392c54d9c 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -104,7 +104,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) { - bool fw_ps, single_sta; + bool fw_ps, single_link; u8 tx_pkts; if (WARN_ON(!test_bit(hlid, wlvif->links_map))) @@ -112,15 +112,15 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl, fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); tx_pkts = wl->links[hlid].allocated_pkts; - single_sta = (wl->active_sta_count == 1); + single_link = (wl->active_link_count == 1); /* * if in FW PS and there is enough data in FW we can put the link * into high-level PS and clean out its TX queues. - * Make an exception if this is the only connected station. In this - * case FW-memory congestion is not a problem. + * Make an exception if this is the only connected link. In this + * case FW-memory congestion is less of a problem. */ - if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) + if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) wl12xx_ps_link_start(wl, wlvif, hlid, true); } @@ -155,21 +155,18 @@ static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct sk_buff *skb, struct ieee80211_sta *sta) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - - if (!wlvif || wl12xx_is_dummy_packet(wl, skb)) - return wl->system_hlid; + struct ieee80211_tx_info *control; if (wlvif->bss_type == BSS_TYPE_AP_BSS) return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta); - if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || - test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) && - !ieee80211_is_auth(hdr->frame_control) && - !ieee80211_is_assoc_req(hdr->frame_control)) - return wlvif->sta.hlid; - else + control = IEEE80211_SKB_CB(skb); + if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { + wl1271_debug(DEBUG_TX, "tx offchannel"); return wlvif->dev_hlid; + } + + return wlvif->sta.hlid; } unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, @@ -224,9 +221,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); wl->tx_allocated_pkts[ac]++; - if (!wl12xx_is_dummy_packet(wl, skb) && wlvif && - wlvif->bss_type == BSS_TYPE_AP_BSS && - test_bit(hlid, wlvif->ap.sta_hlid_map)) + if (test_bit(hlid, wl->links_map)) wl->links[hlid].allocated_pkts++; ret = 0; @@ -293,9 +288,14 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; } else if (wlvif) { + u8 session_id = wl->session_ids[hlid]; + + if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) && + (wlvif->bss_type == BSS_TYPE_AP_BSS)) + session_id = 0; + /* configure the tx attributes */ - tx_attr = wlvif->session_counter << - TX_HW_ATTR_OFST_SESSION_COUNTER; + tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER; } desc->hlid = hlid; @@ -452,20 +452,22 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, void wl1271_handle_tx_low_watermark(struct wl1271 *wl) { int i; + struct wl12xx_vif *wlvif; - for (i = 0; i < NUM_TX_QUEUES; i++) { - if (wlcore_is_queue_stopped_by_reason(wl, i, - WLCORE_QUEUE_STOP_REASON_WATERMARK) && - wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) { - /* firmware buffer has space, restart queues */ - wlcore_wake_queue(wl, i, - WLCORE_QUEUE_STOP_REASON_WATERMARK); + wl12xx_for_each_wlvif(wl, wlvif) { + for (i = 0; i < NUM_TX_QUEUES; i++) { + if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i, + WLCORE_QUEUE_STOP_REASON_WATERMARK) && + wlvif->tx_queue_count[i] <= + WL1271_TX_QUEUE_LOW_WATERMARK) + /* firmware buffer has space, restart queues */ + wlcore_wake_queue(wl, wlvif, i, + WLCORE_QUEUE_STOP_REASON_WATERMARK); } } } -static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, - struct sk_buff_head *queues) +static int wlcore_select_ac(struct wl1271 *wl) { int i, q = -1, ac; u32 min_pkts = 0xffffffff; @@ -479,45 +481,60 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, */ for (i = 0; i < NUM_TX_QUEUES; i++) { ac = wl1271_tx_get_queue(i); - if (!skb_queue_empty(&queues[ac]) && - (wl->tx_allocated_pkts[ac] < min_pkts)) { + if (wl->tx_queue_count[ac] && + wl->tx_allocated_pkts[ac] < min_pkts) { q = ac; min_pkts = wl->tx_allocated_pkts[q]; } } - if (q == -1) - return NULL; - - return &queues[q]; + return q; } -static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl, - struct wl1271_link *lnk) +static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl, + struct wl1271_link *lnk, u8 q) { struct sk_buff *skb; unsigned long flags; - struct sk_buff_head *queue; - queue = wl1271_select_queue(wl, lnk->tx_queue); - if (!queue) - return NULL; - - skb = skb_dequeue(queue); + skb = skb_dequeue(&lnk->tx_queue[q]); if (skb) { - int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); spin_lock_irqsave(&wl->wl_lock, flags); WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); wl->tx_queue_count[q]--; + if (lnk->wlvif) { + WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0); + lnk->wlvif->tx_queue_count[q]--; + } spin_unlock_irqrestore(&wl->wl_lock, flags); } return skb; } -static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, - struct wl12xx_vif *wlvif, - u8 *hlid) +static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl, + u8 hlid, u8 ac, + u8 *low_prio_hlid) +{ + struct wl1271_link *lnk = &wl->links[hlid]; + + if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) { + if (*low_prio_hlid == WL12XX_INVALID_LINK_ID && + !skb_queue_empty(&lnk->tx_queue[ac]) && + wlcore_hw_lnk_low_prio(wl, hlid, lnk)) + /* we found the first non-empty low priority queue */ + *low_prio_hlid = hlid; + + return NULL; + } + + return wlcore_lnk_dequeue(wl, lnk, ac); +} + +static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u8 ac, u8 *hlid, + u8 *low_prio_hlid) { struct sk_buff *skb = NULL; int i, h, start_hlid; @@ -533,7 +550,8 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, if (!test_bit(h, wlvif->links_map)) continue; - skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]); + skb = wlcore_lnk_dequeue_high_prio(wl, h, ac, + low_prio_hlid); if (!skb) continue; @@ -553,42 +571,74 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid) unsigned long flags; struct wl12xx_vif *wlvif = wl->last_wlvif; struct sk_buff *skb = NULL; + int ac; + u8 low_prio_hlid = WL12XX_INVALID_LINK_ID; + + ac = wlcore_select_ac(wl); + if (ac < 0) + goto out; /* continue from last wlvif (round robin) */ if (wlvif) { wl12xx_for_each_wlvif_continue(wl, wlvif) { - skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid); - if (skb) { - wl->last_wlvif = wlvif; - break; - } + if (!wlvif->tx_queue_count[ac]) + continue; + + skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, + &low_prio_hlid); + if (!skb) + continue; + + wl->last_wlvif = wlvif; + break; } } /* dequeue from the system HLID before the restarting wlvif list */ if (!skb) { - skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]); - *hlid = wl->system_hlid; + skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid, + ac, &low_prio_hlid); + if (skb) { + *hlid = wl->system_hlid; + wl->last_wlvif = NULL; + } } - /* do a new pass over the wlvif list */ + /* Do a new pass over the wlvif list. But no need to continue + * after last_wlvif. The previous pass should have found it. */ if (!skb) { wl12xx_for_each_wlvif(wl, wlvif) { - skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid); + if (!wlvif->tx_queue_count[ac]) + goto next; + + skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, + &low_prio_hlid); if (skb) { wl->last_wlvif = wlvif; break; } - /* - * No need to continue after last_wlvif. The previous - * pass should have found it. - */ +next: if (wlvif == wl->last_wlvif) break; } } + /* no high priority skbs found - but maybe a low priority one? */ + if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) { + struct wl1271_link *lnk = &wl->links[low_prio_hlid]; + skb = wlcore_lnk_dequeue(wl, lnk, ac); + + WARN_ON(!skb); /* we checked this before */ + *hlid = low_prio_hlid; + + /* ensure proper round robin in the vif/link levels */ + wl->last_wlvif = lnk->wlvif; + if (lnk->wlvif) + lnk->wlvif->last_tx_hlid = low_prio_hlid; + + } + if (!skb && test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { int q; @@ -602,6 +652,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid) spin_unlock_irqrestore(&wl->wl_lock, flags); } +out: return skb; } @@ -623,6 +674,8 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, spin_lock_irqsave(&wl->wl_lock, flags); wl->tx_queue_count[q]++; + if (wlvif) + wlvif->tx_queue_count[q]++; spin_unlock_irqrestore(&wl->wl_lock, flags); } @@ -699,7 +752,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl) bool has_data = false; wlvif = NULL; - if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif) + if (!wl12xx_is_dummy_packet(wl, skb)) wlvif = wl12xx_vif_to_data(info->control.vif); else hlid = wl->system_hlid; @@ -972,10 +1025,11 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) unsigned long flags; struct ieee80211_tx_info *info; int total[NUM_TX_QUEUES]; + struct wl1271_link *lnk = &wl->links[hlid]; for (i = 0; i < NUM_TX_QUEUES; i++) { total[i] = 0; - while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) { + while ((skb = skb_dequeue(&lnk->tx_queue[i]))) { wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); if (!wl12xx_is_dummy_packet(wl, skb)) { @@ -990,8 +1044,11 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) } spin_lock_irqsave(&wl->wl_lock, flags); - for (i = 0; i < NUM_TX_QUEUES; i++) + for (i = 0; i < NUM_TX_QUEUES; i++) { wl->tx_queue_count[i] -= total[i]; + if (lnk->wlvif) + lnk->wlvif->tx_queue_count[i] -= total[i]; + } spin_unlock_irqrestore(&wl->wl_lock, flags); wl1271_handle_tx_low_watermark(wl); @@ -1004,16 +1061,18 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) /* TX failure */ for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) { - if (wlvif->bss_type == BSS_TYPE_AP_BSS) + if (wlvif->bss_type == BSS_TYPE_AP_BSS) { + /* this calls wl12xx_free_link */ wl1271_free_sta(wl, wlvif, i); - else - wlvif->sta.ba_rx_bitmap = 0; - - wl->links[i].allocated_pkts = 0; - wl->links[i].prev_freed_pkts = 0; + } else { + u8 hlid = i; + wl12xx_free_link(wl, wlvif, &hlid); + } } wlvif->last_tx_hlid = 0; + for (i = 0; i < NUM_TX_QUEUES; i++) + wlvif->tx_queue_count[i] = 0; } /* caller must hold wl->mutex and TX must be stopped */ void wl12xx_tx_reset(struct wl1271 *wl) @@ -1023,7 +1082,7 @@ void wl12xx_tx_reset(struct wl1271 *wl) struct ieee80211_tx_info *info; /* only reset the queues if something bad happened */ - if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) { + if (wl1271_tx_total_queue_count(wl) != 0) { for (i = 0; i < WL12XX_MAX_LINKS; i++) wl1271_tx_reset_link_queues(wl, i); @@ -1135,45 +1194,48 @@ u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) return BIT(__ffs(rate_set)); } +EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get); -void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue, - enum wlcore_queue_stop_reason reason) +void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue, enum wlcore_queue_stop_reason reason) { - bool stopped = !!wl->queue_stop_reasons[queue]; + int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); + bool stopped = !!wl->queue_stop_reasons[hwq]; /* queue should not be stopped for this reason */ - WARN_ON(test_and_set_bit(reason, &wl->queue_stop_reasons[queue])); + WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); if (stopped) return; - ieee80211_stop_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue)); + ieee80211_stop_queue(wl->hw, hwq); } -void wlcore_stop_queue(struct wl1271 *wl, u8 queue, +void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, enum wlcore_queue_stop_reason reason) { unsigned long flags; spin_lock_irqsave(&wl->wl_lock, flags); - wlcore_stop_queue_locked(wl, queue, reason); + wlcore_stop_queue_locked(wl, wlvif, queue, reason); spin_unlock_irqrestore(&wl->wl_lock, flags); } -void wlcore_wake_queue(struct wl1271 *wl, u8 queue, +void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, enum wlcore_queue_stop_reason reason) { unsigned long flags; + int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); spin_lock_irqsave(&wl->wl_lock, flags); /* queue should not be clear for this reason */ - WARN_ON(!test_and_clear_bit(reason, &wl->queue_stop_reasons[queue])); + WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq])); - if (wl->queue_stop_reasons[queue]) + if (wl->queue_stop_reasons[hwq]) goto out; - ieee80211_wake_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue)); + ieee80211_wake_queue(wl->hw, hwq); out: spin_unlock_irqrestore(&wl->wl_lock, flags); @@ -1183,48 +1245,74 @@ void wlcore_stop_queues(struct wl1271 *wl, enum wlcore_queue_stop_reason reason) { int i; + unsigned long flags; - for (i = 0; i < NUM_TX_QUEUES; i++) - wlcore_stop_queue(wl, i, reason); + spin_lock_irqsave(&wl->wl_lock, flags); + + /* mark all possible queues as stopped */ + for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) + WARN_ON_ONCE(test_and_set_bit(reason, + &wl->queue_stop_reasons[i])); + + /* use the global version to make sure all vifs in mac80211 we don't + * know are stopped. + */ + ieee80211_stop_queues(wl->hw); + + spin_unlock_irqrestore(&wl->wl_lock, flags); } -EXPORT_SYMBOL_GPL(wlcore_stop_queues); void wlcore_wake_queues(struct wl1271 *wl, enum wlcore_queue_stop_reason reason) { int i; + unsigned long flags; - for (i = 0; i < NUM_TX_QUEUES; i++) - wlcore_wake_queue(wl, i, reason); + spin_lock_irqsave(&wl->wl_lock, flags); + + /* mark all possible queues as awake */ + for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) + WARN_ON_ONCE(!test_and_clear_bit(reason, + &wl->queue_stop_reasons[i])); + + /* use the global version to make sure all vifs in mac80211 we don't + * know are woken up. + */ + ieee80211_wake_queues(wl->hw); + + spin_unlock_irqrestore(&wl->wl_lock, flags); } -EXPORT_SYMBOL_GPL(wlcore_wake_queues); -void wlcore_reset_stopped_queues(struct wl1271 *wl) +bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u8 queue, + enum wlcore_queue_stop_reason reason) { - int i; unsigned long flags; + bool stopped; spin_lock_irqsave(&wl->wl_lock, flags); - - for (i = 0; i < NUM_TX_QUEUES; i++) { - if (!wl->queue_stop_reasons[i]) - continue; - - wl->queue_stop_reasons[i] = 0; - ieee80211_wake_queue(wl->hw, - wl1271_tx_get_mac80211_queue(i)); - } - + stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue, + reason); spin_unlock_irqrestore(&wl->wl_lock, flags); + + return stopped; } -bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue, - enum wlcore_queue_stop_reason reason) +bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u8 queue, + enum wlcore_queue_stop_reason reason) { - return test_bit(reason, &wl->queue_stop_reasons[queue]); + int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); + + WARN_ON_ONCE(!spin_is_locked(&wl->wl_lock)); + return test_bit(reason, &wl->queue_stop_reasons[hwq]); } -bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue) +bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue) { - return !!wl->queue_stop_reasons[queue]; + int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); + + WARN_ON_ONCE(!spin_is_locked(&wl->wl_lock)); + return !!wl->queue_stop_reasons[hwq]; } diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h index 349520d8b724..55aa4acf9105 100644 --- a/drivers/net/wireless/ti/wlcore/tx.h +++ b/drivers/net/wireless/ti/wlcore/tx.h @@ -207,19 +207,22 @@ static inline int wl1271_tx_get_queue(int queue) } } -static inline int wl1271_tx_get_mac80211_queue(int queue) +static inline +int wlcore_tx_get_mac80211_queue(struct wl12xx_vif *wlvif, int queue) { + int mac_queue = wlvif->hw_queue_base; + switch (queue) { case CONF_TX_AC_VO: - return 0; + return mac_queue + 0; case CONF_TX_AC_VI: - return 1; + return mac_queue + 1; case CONF_TX_AC_BE: - return 2; + return mac_queue + 2; case CONF_TX_AC_BK: - return 3; + return mac_queue + 3; default: - return 2; + return mac_queue + 2; } } @@ -252,20 +255,26 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids); unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, unsigned int packet_length); void wl1271_free_tx_id(struct wl1271 *wl, int id); -void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue, - enum wlcore_queue_stop_reason reason); -void wlcore_stop_queue(struct wl1271 *wl, u8 queue, +void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue, enum wlcore_queue_stop_reason reason); +void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, enum wlcore_queue_stop_reason reason); -void wlcore_wake_queue(struct wl1271 *wl, u8 queue, +void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, enum wlcore_queue_stop_reason reason); void wlcore_stop_queues(struct wl1271 *wl, enum wlcore_queue_stop_reason reason); void wlcore_wake_queues(struct wl1271 *wl, enum wlcore_queue_stop_reason reason); -void wlcore_reset_stopped_queues(struct wl1271 *wl); -bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue, +bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u8 queue, enum wlcore_queue_stop_reason reason); -bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue); +bool +wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u8 queue, + enum wlcore_queue_stop_reason reason); +bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue); /* from main.c */ void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index c3884937c007..af9fecaefc30 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -37,6 +37,9 @@ */ #define WLCORE_NUM_MAC_ADDRESSES 3 +/* wl12xx/wl18xx maximum transmission power (in dBm) */ +#define WLCORE_MAX_TXPWR 25 + /* forward declaration */ struct wl1271_tx_hw_descr; enum wl_rx_buf_align; @@ -51,6 +54,9 @@ struct wlcore_ops { int (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr, void *buf, size_t len); int (*ack_event)(struct wl1271 *wl); + int (*wait_for_event)(struct wl1271 *wl, enum wlcore_wait_event event, + bool *timeout); + int (*process_mailbox_events)(struct wl1271 *wl); u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks); void (*set_tx_desc_blocks)(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc, @@ -82,12 +88,32 @@ struct wlcore_ops { int (*debugfs_init)(struct wl1271 *wl, struct dentry *rootdir); int (*handle_static_data)(struct wl1271 *wl, struct wl1271_static_data *static_data); + int (*scan_start)(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct cfg80211_scan_request *req); + int (*scan_stop)(struct wl1271 *wl, struct wl12xx_vif *wlvif); + int (*sched_scan_start)(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_sched_scan_ies *ies); + void (*sched_scan_stop)(struct wl1271 *wl, struct wl12xx_vif *wlvif); int (*get_spare_blocks)(struct wl1271 *wl, bool is_gem); int (*set_key)(struct wl1271 *wl, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key_conf); + int (*channel_switch)(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct ieee80211_channel_switch *ch_switch); u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len); + void (*sta_rc_update)(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct ieee80211_sta *sta, u32 changed); + int (*set_peer_cap)(struct wl1271 *wl, + struct ieee80211_sta_ht_cap *ht_cap, + bool allow_ht_operation, + u32 rate_set, u8 hlid); + bool (*lnk_high_prio)(struct wl1271 *wl, u8 hlid, + struct wl1271_link *lnk); + bool (*lnk_low_prio)(struct wl1271 *wl, u8 hlid, + struct wl1271_link *lnk); }; enum wlcore_partitions { @@ -157,7 +183,6 @@ struct wl1271 { struct wl1271_if_operations *if_ops; - void (*set_power)(bool enable); int irq; spinlock_t wl_lock; @@ -202,6 +227,8 @@ struct wl1271 { unsigned long klv_templates_map[ BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)]; + u8 session_ids[WL12XX_MAX_LINKS]; + struct list_head wlvif_list; u8 sta_count; @@ -227,7 +254,8 @@ struct wl1271 { /* Frames scheduled for transmission, not handled yet */ int tx_queue_count[NUM_TX_QUEUES]; - unsigned long queue_stop_reasons[NUM_TX_QUEUES]; + unsigned long queue_stop_reasons[ + NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES]; /* Frames received, not handled yet by mac80211 */ struct sk_buff_head deferred_rx_queue; @@ -269,24 +297,30 @@ struct wl1271 { struct work_struct recovery_work; bool watchdog_recovery; + /* Reg domain last configuration */ + u32 reg_ch_conf_last[2]; + /* Reg domain pending configuration */ + u32 reg_ch_conf_pending[2]; + /* Pointer that holds DMA-friendly block for the mailbox */ - struct event_mailbox *mbox; + void *mbox; /* The mbox event mask */ u32 event_mask; /* Mailbox pointers */ + u32 mbox_size; u32 mbox_ptr[2]; /* Are we currently scanning */ - struct ieee80211_vif *scan_vif; + struct wl12xx_vif *scan_wlvif; struct wl1271_scan scan; struct delayed_work scan_complete_work; - /* Connection loss work */ - struct delayed_work connection_loss_work; + struct ieee80211_vif *roc_vif; + struct delayed_work roc_complete_work; - bool sched_scanning; + struct wl12xx_vif *sched_vif; /* The current band */ enum ieee80211_band band; @@ -299,7 +333,7 @@ struct wl1271 { struct wl1271_stats stats; - __le32 buffer_32; + __le32 *buffer_32; u32 buffer_cmd; u32 buffer_busyword[WL1271_BUSY_WORD_CNT]; @@ -314,6 +348,8 @@ struct wl1271 { bool enable_11a; + int recovery_count; + /* Most recently reported noise in dBm */ s8 noise; @@ -333,6 +369,12 @@ struct wl1271 { */ struct wl1271_link links[WL12XX_MAX_LINKS]; + /* number of currently active links */ + int active_link_count; + + /* Fast/slow links bitmap according to FW */ + u32 fw_fast_lnk_map; + /* AP-mode - a bitmap of links currently in PS mode according to FW */ u32 ap_fw_ps_map; @@ -367,6 +409,12 @@ struct wl1271 { const char *sr_fw_name; const char *mr_fw_name; + u8 scan_templ_id_2_4; + u8 scan_templ_id_5; + u8 sched_scan_templ_id_2_4; + u8 sched_scan_templ_id_5; + u8 max_channels_5; + /* per-chip-family private structure */ void *priv; @@ -408,20 +456,28 @@ struct wl1271 { /* the number of allocated MAC addresses in this chip */ int num_mac_addr; - /* the minimum FW version required for the driver to work */ - unsigned int min_fw_ver[NUM_FW_VER]; + /* minimum FW version required for the driver to work in single-role */ + unsigned int min_sr_fw_ver[NUM_FW_VER]; + + /* minimum FW version required for the driver to work in multi-role */ + unsigned int min_mr_fw_ver[NUM_FW_VER]; struct completion nvs_loading_complete; + + /* number of concurrent channels the HW supports */ + u32 num_channels; }; int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev); int wlcore_remove(struct platform_device *pdev); -struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size); +struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, + u32 mbox_size); int wlcore_free_hw(struct wl1271 *wl); int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key_conf); +void wlcore_regdomain_config(struct wl1271 *wl); static inline void wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band, @@ -430,16 +486,27 @@ wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band, memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap)); } +/* Tell wlcore not to care about this element when checking the version */ +#define WLCORE_FW_VER_IGNORE -1 + static inline void wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip, - unsigned int iftype, unsigned int major, - unsigned int subtype, unsigned int minor) + unsigned int iftype_sr, unsigned int major_sr, + unsigned int subtype_sr, unsigned int minor_sr, + unsigned int iftype_mr, unsigned int major_mr, + unsigned int subtype_mr, unsigned int minor_mr) { - wl->min_fw_ver[FW_VER_CHIP] = chip; - wl->min_fw_ver[FW_VER_IF_TYPE] = iftype; - wl->min_fw_ver[FW_VER_MAJOR] = major; - wl->min_fw_ver[FW_VER_SUBTYPE] = subtype; - wl->min_fw_ver[FW_VER_MINOR] = minor; + wl->min_sr_fw_ver[FW_VER_CHIP] = chip; + wl->min_sr_fw_ver[FW_VER_IF_TYPE] = iftype_sr; + wl->min_sr_fw_ver[FW_VER_MAJOR] = major_sr; + wl->min_sr_fw_ver[FW_VER_SUBTYPE] = subtype_sr; + wl->min_sr_fw_ver[FW_VER_MINOR] = minor_sr; + + wl->min_mr_fw_ver[FW_VER_CHIP] = chip; + wl->min_mr_fw_ver[FW_VER_IF_TYPE] = iftype_mr; + wl->min_mr_fw_ver[FW_VER_MAJOR] = major_mr; + wl->min_mr_fw_ver[FW_VER_SUBTYPE] = subtype_mr; + wl->min_mr_fw_ver[FW_VER_MINOR] = minor_mr; } /* Firmware image load chunk size */ @@ -450,6 +517,9 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip, /* Each RX/TX transaction requires an end-of-transaction transfer */ #define WLCORE_QUIRK_END_OF_TRANSACTION BIT(0) +/* the first start_role(sta) sometimes doesn't work on wl12xx */ +#define WLCORE_QUIRK_START_STA_FAILS BIT(1) + /* wl127x and SPI don't support SDIO block size alignment */ #define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN BIT(2) @@ -462,9 +532,6 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip, /* Older firmwares use an old NVS format */ #define WLCORE_QUIRK_LEGACY_NVS BIT(5) -/* Some firmwares may not support ELP */ -#define WLCORE_QUIRK_NO_ELP BIT(6) - /* pad only the last frame in the aggregate buffer */ #define WLCORE_QUIRK_TX_PAD_LAST_FRAME BIT(7) @@ -477,11 +544,11 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip, /* separate probe response templates for one-shot and sched scans */ #define WLCORE_QUIRK_DUAL_PROBE_TMPL BIT(10) -/* TODO: move to the lower drivers when all usages are abstracted */ -#define CHIP_ID_1271_PG10 (0x4030101) -#define CHIP_ID_1271_PG20 (0x4030111) -#define CHIP_ID_1283_PG10 (0x05030101) -#define CHIP_ID_1283_PG20 (0x05030111) +/* Firmware requires reg domain configuration for active calibration */ +#define WLCORE_QUIRK_REGDOMAIN_CONF BIT(11) + +/* The FW only support a zero session id for AP */ +#define WLCORE_QUIRK_AP_ZERO_SESSION_ID BIT(12) /* TODO: move all these common registers and values elsewhere */ #define HW_ACCESS_ELP_CTRL_REG 0x1FFFC diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h index 6678d4b18611..508f5b0f8a70 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore_i.h +++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h @@ -109,22 +109,11 @@ enum { NUM_FW_VER }; -#define FW_VER_CHIP_WL127X 6 -#define FW_VER_CHIP_WL128X 7 - -#define FW_VER_IF_TYPE_STA 1 -#define FW_VER_IF_TYPE_AP 2 - -#define FW_VER_MINOR_1_SPARE_STA_MIN 58 -#define FW_VER_MINOR_1_SPARE_AP_MIN 47 - -#define FW_VER_MINOR_FWLOG_STA_MIN 70 - struct wl1271_chip { u32 id; - char fw_ver_str[ETHTOOL_BUSINFO_LEN]; + char fw_ver_str[ETHTOOL_FWVERS_LEN]; unsigned int fw_ver[NUM_FW_VER]; - char phy_fw_ver_str[ETHTOOL_BUSINFO_LEN]; + char phy_fw_ver_str[ETHTOOL_FWVERS_LEN]; }; #define NUM_TX_QUEUES 4 @@ -141,7 +130,10 @@ struct wl_fw_packet_counters { /* Cumulative counter of released Voice memory blocks */ u8 tx_voice_released_blks; - u8 padding[3]; + /* Tx rate of the last transmitted packet */ + u8 tx_last_rate; + + u8 padding[2]; } __packed; /* FW status registers */ @@ -214,6 +206,11 @@ struct wl1271_if_operations { void (*set_block_size) (struct device *child, unsigned int blksz); }; +struct wlcore_platdev_data { + struct wl12xx_platform_data *pdata; + struct wl1271_if_operations *if_ops; +}; + #define MAX_NUM_KEYS 14 #define MAX_KEY_SIZE 32 @@ -260,6 +257,8 @@ enum wl12xx_vif_flags { WLVIF_FLAG_IN_USE, }; +struct wl12xx_vif; + struct wl1271_link { /* AP-mode - TX queue per AC in link */ struct sk_buff_head tx_queue[NUM_TX_QUEUES]; @@ -272,6 +271,9 @@ struct wl1271_link { /* bitmap of TIDs where RX BA sessions are active for this link */ u8 ba_bitmap; + + /* The wlvif this link belongs to. Might be null for global links */ + struct wl12xx_vif *wlvif; }; #define WL1271_MAX_RX_FILTERS 5 @@ -315,6 +317,7 @@ struct wl12xx_rx_filter { struct wl1271_station { u8 hlid; + bool in_connection; }; struct wl12xx_vif { @@ -332,7 +335,6 @@ struct wl12xx_vif { union { struct { u8 hlid; - u8 ba_rx_bitmap; u8 basic_rate_idx; u8 ap_rate_idx; @@ -341,6 +343,8 @@ struct wl12xx_vif { u8 klv_template_id; bool qos; + /* channel type we started the STA role with */ + enum nl80211_channel_type role_chan_type; } sta; struct { u8 global_hlid; @@ -362,6 +366,9 @@ struct wl12xx_vif { /* the hlid of the last transmitted skb */ int last_tx_hlid; + /* counters of packets per AC, across all links in the vif */ + int tx_queue_count[NUM_TX_QUEUES]; + unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)]; u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; @@ -396,9 +403,6 @@ struct wl12xx_vif { /* Our association ID */ u16 aid; - /* Session counter for the chipset */ - int session_counter; - /* retry counter for PSM entries */ u8 psm_entry_retry; @@ -416,11 +420,28 @@ struct wl12xx_vif { bool ba_support; bool ba_allowed; + bool wmm_enabled; + /* Rx Streaming */ struct work_struct rx_streaming_enable_work; struct work_struct rx_streaming_disable_work; struct timer_list rx_streaming_timer; + struct delayed_work channel_switch_work; + struct delayed_work connection_loss_work; + + /* number of in connection stations */ + int inconn_count; + + /* + * This vif's queues are mapped to mac80211 HW queues as: + * VO - hw_queue_base + * VI - hw_queue_base + 1 + * BE - hw_queue_base + 2 + * BK - hw_queue_base + 3 + */ + int hw_queue_base; + /* * This struct must be last! * data that has to be saved acrossed reconfigs (e.g. recovery) @@ -443,6 +464,7 @@ struct wl12xx_vif { static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif) { + WARN_ON(!vif); return (struct wl12xx_vif *)vif->drv_priv; } diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index ef2b171e3514..7ef0b4a181e1 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -155,7 +155,6 @@ static int upload_code(struct usb_device *udev, */ p = kmalloc(MAX_TRANSFER_SIZE, GFP_KERNEL); if (!p) { - dev_err(&udev->dev, "out of memory\n"); r = -ENOMEM; goto error; } diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index b8c5193bd420..d98414168485 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif) static void xenvif_down(struct xenvif *vif) { disable_irq(vif->irq); + del_timer_sync(&vif->credit_timeout); xen_netbk_deschedule_xenvif(vif); xen_netbk_remove_xenvif(vif); } @@ -238,6 +239,8 @@ static const struct net_device_ops xenvif_netdev_ops = { .ndo_stop = xenvif_close, .ndo_change_mtu = xenvif_change_mtu, .ndo_fix_features = xenvif_fix_features, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, }; struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, @@ -363,8 +366,6 @@ void xenvif_disconnect(struct xenvif *vif) atomic_dec(&vif->refcnt); wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); - del_timer_sync(&vif->credit_timeout); - if (vif->irq) unbind_from_irqhandler(vif->irq, vif); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 2b9520c46e97..cd49ba949636 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -911,13 +911,13 @@ static int netbk_count_requests(struct xenvif *vif, if (frags >= work_to_do) { netdev_err(vif->dev, "Need more frags\n"); netbk_fatal_tx_err(vif); - return -frags; + return -ENODATA; } if (unlikely(frags >= MAX_SKB_FRAGS)) { netdev_err(vif->dev, "Too many frags\n"); netbk_fatal_tx_err(vif); - return -frags; + return -E2BIG; } memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), @@ -925,7 +925,7 @@ static int netbk_count_requests(struct xenvif *vif, if (txp->size > first->size) { netdev_err(vif->dev, "Frag is bigger than frame.\n"); netbk_fatal_tx_err(vif); - return -frags; + return -EIO; } first->size -= txp->size; @@ -935,7 +935,7 @@ static int netbk_count_requests(struct xenvif *vif, netdev_err(vif->dev, "txp->offset: %x, size: %u\n", txp->offset, txp->size); netbk_fatal_tx_err(vif); - return -frags; + return -EINVAL; } } while ((txp++)->flags & XEN_NETTXF_more_data); return frags; |